diff --git a/.gitignore b/.gitignore
index 5e1669b687d12ede6f94a5033432c1dcc799c299..985d6989e02f7583ec1b72f32bde79147d615d2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+*.DS_Store
.vscode/
/doc/fluid/menu.zh.json
/doc/fluid/menu.en.json
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e06e02f312e8c594a28249e7e9d32eb5a60bf7e9
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,42 @@
+- repo: https://github.com/pre-commit/mirrors-yapf.git
+ sha: v0.16.0
+ hooks:
+ - id: yapf
+ files: \.py$
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ sha: a11d9314b22d8f8c7556443875b731ef05965464
+ hooks:
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: detect-private-key
+ files: (?!.*paddle)^.*$
+ - id: end-of-file-fixer
+ files: \.md$
+ - id: trailing-whitespace
+ files: \.md$
+- repo: https://github.com/Lucas-C/pre-commit-hooks
+ sha: v1.0.1
+ hooks:
+ - id: forbid-crlf
+ files: \.md$
+ - id: remove-crlf
+ files: \.md$
+ - id: forbid-tabs
+ files: \.md$
+ - id: remove-tabs
+ files: \.md$
+- repo: https://github.com/reyoung/pre-commit-hooks-jinja-compile.git
+ sha: 4a369cc72a4a2b8d3813ab8cc17abb5f5b21ef6c
+ hooks:
+ - id: convert-jinja2-into-html
+ # The argument means repleace filename from pattern `.*/([^/]*)\.tmpl` to `\1`
+ args: ['--filename_pattern=.*/([^/]*)\.tmpl', '--filename_repl=\1']
+- repo: local
+ hooks:
+ - id: convert-markdown-into-html
+ name: convert-markdown-into-html
+ description: Convert README.md into index.html and README.cn.md into index.cn.html
+ entry: python .pre-commit-hooks/convert_markdown_into_html.py
+ language: system
+ files: .+README(\.cn)?\.md$
+
diff --git a/.pre-commit-hooks/convert_markdown_into_html.py b/.pre-commit-hooks/convert_markdown_into_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f1db82b3ff085cc2e027bfc024d1bb8d87d6be8
--- /dev/null
+++ b/.pre-commit-hooks/convert_markdown_into_html.py
@@ -0,0 +1,95 @@
+import argparse
+import re
+import sys
+
+HEAD = """
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+TAIL = """
+
+
+
+
+
+"""
+
+
+def convert_markdown_into_html(argv=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('filenames', nargs='*', help='Filenames to fix')
+ args = parser.parse_args(argv)
+
+ retv = 0
+
+ for filename in args.filenames:
+ with open(
+ re.sub(r"README", "index", re.sub(r"\.md$", ".html",
+ filename)), "w") as output:
+ output.write(HEAD)
+ with open(filename) as input:
+ for line in input:
+ output.write(line)
+ output.write(TAIL)
+
+ return retv
+
+
+if __name__ == '__main__':
+ sys.exit(convert_markdown_into_html())
diff --git a/.pre-commit-hooks/convert_markdown_into_ipynb.sh b/.pre-commit-hooks/convert_markdown_into_ipynb.sh
new file mode 100644
index 0000000000000000000000000000000000000000..dbcb1046d82010f776792287b45abdebf5b097ee
--- /dev/null
+++ b/.pre-commit-hooks/convert_markdown_into_ipynb.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+for file in $@ ; do
+ markdown-to-ipynb < $file > ${file%.*}".ipynb"
+ if [ $? -ne 0 ]; then
+ echo >&2 "markdown-to-ipynb $file error"
+ exit 1
+ fi
+done
+
diff --git a/ci_scripts/api_white_list.txt b/ci_scripts/api_white_list.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a50852af163ac286d3e8cba504078e746783fd79
--- /dev/null
+++ b/ci_scripts/api_white_list.txt
@@ -0,0 +1,10 @@
+paddle/fluid/DistributeTranspiler_cn.rst
+paddle/fluid/DistributeTranspilerConfig_cn.rst
+paddle/fluid/transpiler/HashName_cn.rst
+paddle/fluid/memory_optimize_cn.rst
+paddle/fluid/release_memory_cn.rst
+paddle/optimizer/Dpsgd_cn.rst
+paddle/reader/ComposeNotAligned_cn.rst
+paddle/fluid/layers/scatter_cn.rst
+paddle/tensor/manipulation/scatter_cn.rst
+paddle/distributed/fleet/Fleet_cn.rst
diff --git a/ci_scripts/check_api_cn.sh b/ci_scripts/check_api_cn.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c2cd521f9376d5e2bc0a255b6a8d47bf183a5ad6
--- /dev/null
+++ b/ci_scripts/check_api_cn.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+git_files=`git diff --numstat upstream/$BRANCH | awk '{print $NF}'`
+
+for file in `echo $git_files`;do
+ grep "code-block" ../$file
+ if [ $? -eq 0 ] ;then
+ echo $file | grep "doc/paddle/api/paddle/.*_cn.rst"
+ if [ $? -eq 0 ];then
+ api_file=`echo $file | sed 's#doc/paddle/api/##g'`
+ grep -w "${api_file}" ${DIR_PATH}/api_white_list.txt
+ if [ $? -ne 0 ];then
+ python chinese_samplecode_processor.py ../$file
+ if [ $? -ne 0 ];then
+ echo "chinese sample code failed"
+ exit 5
+ fi
+ fi
+ fi
+ fi
+done
+
diff --git a/ci_scripts/check_code.sh b/ci_scripts/check_code.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3841b912fbdce3fe4343056ce69e46aaece9c5e2
--- /dev/null
+++ b/ci_scripts/check_code.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#=================================================
+# Utils
+#=================================================
+
+set -ex
+
+if [ -z ${BRANCH} ]; then
+ BRANCH="develop"
+fi
+
+BENCHMARK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )"
+echo ${BENCHMARK_ROOT}
+
+function prepare_env(){
+ # Install tensorflow and other packages
+ pip install pre-commit==1.21 pylint==1.9.5 pytest==4.6.9
+}
+
+function abort(){
+ echo "Your change doesn't follow benchmark's code style." 1>&2
+ echo "Please use pre-commit to check what is wrong." 1>&2
+ exit 1
+}
+
+
+function check_style(){
+ trap 'abort' 0
+ pre-commit install
+ commit_files=on
+ for file_name in `git diff --numstat upstream/$BRANCH| awk '{print $NF}'`;do
+ if ! pre-commit run --files ../$file_name ; then
+ git diff
+ commit_files=off
+ fi
+ done
+ if [ $commit_files == 'off' ];then
+ echo "code format error"
+ exit 1
+ fi
+ trap 0
+}
+
+prepare_env
+check_style
diff --git a/ci_scripts/check_pr_approval.py b/ci_scripts/check_pr_approval.py
new file mode 100644
index 0000000000000000000000000000000000000000..937b0be7562fab93157c16b942631f0a580dfc68
--- /dev/null
+++ b/ci_scripts/check_pr_approval.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import sys
+import json
+
+
+def check_approval(count, required_reviewers):
+ json_buff = ""
+ for line in sys.stdin:
+ json_buff = "".join([json_buff, line])
+ json_resp = json.loads(json_buff)
+ approves = 0
+ approved_user_ids = []
+ for review in json_resp:
+ if review["state"] == "APPROVED":
+ approves += 1
+ approved_user_ids.append(review["user"]["id"])
+
+ # convert to int
+ required_reviewers_int = set()
+ for rr in required_reviewers:
+ required_reviewers_int.add(int(rr))
+
+ if len(set(approved_user_ids) & required_reviewers_int) >= count:
+ print("TRUE")
+ else:
+ print("FALSE")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1 and sys.argv[1].isdigit():
+ check_approval(int(sys.argv[1]), sys.argv[2:])
+ else:
+ print(
+ "Usage: python check_pr_approval.py [count] [required reviewer id] ..."
+ )
diff --git a/ci_scripts/checkapproval.sh b/ci_scripts/checkapproval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9bd4b99296598c78df81ec65d533fe8cab4ecdc4
--- /dev/null
+++ b/ci_scripts/checkapproval.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+API_FILES=("doc/paddle/api/paddle")
+
+for API_FILE in ${API_FILES[*]}; do
+ API_CHANGE=`git diff --name-only upstream/$BRANCH | grep "${API_FILE}"`
+ if [ "${API_CHANGE}" ];then
+ approval_line=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/FluidDoc/pulls/${GIT_PR_ID}/reviews?per_page=10000`
+ if [ "${API_FILE}" == "doc/paddle/api/paddle" ];then
+ APPROVALS=`echo ${approval_line} | python ./check_pr_approval.py 1 2870059 27208573 29231 28379894 23093488 11935832`
+ fi
+ fi
+ if [ "${APPROVALS}" == "FALSE" ]; then
+ if [ "${API_FILE}" == "doc/paddle/api/paddle" ];then
+ echo "You must have one TPM (saxon-zh or swtkiwi or jzhang533 or Heeenrrry or dingjiaweiww or TCChenlong) approval for the api change! ${API_FILE} for the management reason of API interface and API document."
+ fi
+ exit 1
+ fi
+done
+
diff --git a/ci_scripts/chinese_samplecode_processor.py b/ci_scripts/chinese_samplecode_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c40523fc1f2c8fbb02985f67c895b5f8b7c6506
--- /dev/null
+++ b/ci_scripts/chinese_samplecode_processor.py
@@ -0,0 +1,234 @@
+import math
+import os
+import pickle
+import shutil
+import subprocess
+import multiprocessing
+import sys
+
+
+def remove_desc_code(srcls, filename):
+ if filename == 'fluid_cn/one_hot_cn.rst':
+ srcls.pop(13)
+ srcls.pop(28)
+ srcls.pop(44)
+ if filename == 'layers_cn/one_hot_cn.rst':
+ srcls.pop(15)
+ srcls.pop(30)
+ srcls.pop(46)
+ if filename == 'profiler_cn/profiler_cn.rst':
+ srcls.pop(41)
+ if filename == 'layers_cn/natural_exp_decay_cn.rst':
+ srcls.pop(13)
+ if filename == 'layers_cn/transpose_cn.rst':
+ srcls.pop(20)
+ if filename == 'layers_cn/array_length_cn.rst':
+ srcls.pop(36)
+ if filename == 'layers_cn/inverse_time_decay_cn.rst':
+ srcls.pop(13)
+ if filename == 'layers_cn/stack_cn.rst':
+ srcls.pop(12)
+ srcls.pop(33)
+ if filename == 'layers_cn/sums_cn.rst':
+ srcls.pop(11)
+ if filename == 'layers_cn/sum_cn.rst':
+ for i in range(len(srcls) - 1, 61, -1):
+ srcls.pop(i)
+ if filename == 'layers_cn/softmax_cn.rst':
+ srcls.pop(30)
+ srcls.pop(57)
+ if filename == 'layers_cn/array_write_cn.rst':
+ srcls.pop(37)
+ if filename == 'layers_cn/lod_append_cn.rst':
+ srcls.pop(11)
+ if filename == 'layers_cn/reorder_lod_tensor_by_rank_cn.rst':
+ srcls.pop(25)
+ if filename == 'layers_cn/round_cn.rst':
+ srcls.pop(10)
+ if filename == 'layers_cn/squeeze_cn.rst':
+ srcls.pop(11)
+ srcls.pop(19)
+ srcls.pop(27)
+ if filename == 'layers_cn/unsqueeze_cn.rst':
+ srcls.pop(11)
+ if filename == 'layers_cn/array_read_cn.rst':
+ srcls.pop(51)
+ if filename == 'layers_cn/scatter_cn.rst':
+ srcls.pop(9)
+ if filename == 'layers_cn/topk_cn.rst':
+ srcls.pop(11)
+ if filename == 'optimizer_cn/ModelAverage_cn.rst':
+ srcls.pop(15)
+ return srcls
+
+
+def check_indent(code_line):
+ indent = ""
+ for c in code_line:
+ if c == '\t':
+ indent += ' '
+ elif c == ' ':
+ indent += ' '
+ if c != ' ' and c != '\t':
+ break
+ return indent
+
+
+def find_all(src_str, substr):
+ indices = []
+ get_one = src_str.find(substr)
+ while get_one != -1:
+ indices.append(get_one)
+ get_one = src_str.find(substr, get_one + 1)
+ return indices
+
+
+def extract_sample_code(srcfile, status_all):
+ filename = srcfile.name
+ srcc = srcfile.read()
+ srcfile.seek(0, 0)
+ srcls = srcfile.readlines()
+ srcls = remove_desc_code(
+ srcls, filename) # remove description info for samplecode
+ status = []
+ sample_code_begins = find_all(srcc, " code-block:: python")
+ if len(sample_code_begins) == 0:
+ status.append(-1)
+
+ else:
+ for i in range(0, len(srcls)):
+ if srcls[i].find(".. code-block:: python") != -1:
+ content = ""
+ start = i
+
+ blank_line = 1
+ while srcls[start + blank_line].strip() == '':
+ blank_line += 1
+
+ startindent = ""
+ # remove indent error
+ if srcls[start + blank_line].find("from") != -1:
+ startindent += srcls[start + blank_line][:srcls[
+ start + blank_line].find("from")]
+ elif srcls[start + blank_line].find("import") != -1:
+ startindent += srcls[start + blank_line][:srcls[
+ start + blank_line].find("import")]
+ else:
+ startindent += check_indent(srcls[start + blank_line])
+ content += srcls[start + blank_line][len(startindent):]
+ for j in range(start + blank_line + 1, len(srcls)):
+ # planish a blank line
+ if not srcls[j].startswith(startindent) and srcls[
+ j] != '\n':
+ break
+ if srcls[j].find(" code-block:: python") != -1:
+ break
+ content += srcls[j].replace(startindent, "", 1)
+ status.append(run_sample_code(content, filename))
+
+ status_all[filename] = status
+ return status_all
+
+
+def run_sample_code(content, filename):
+ # three status ,-1:no sample code; 1: running error; 0:normal
+ fname = filename.split("/")[-1].replace("_cn", "").replace(".rst",
+ "") + ".py"
+ tempf = open("temp/" + fname, 'w')
+ content = "# -*- coding: utf-8 -*-\n" + content
+ tempf.write(content)
+ tempf.close()
+ cmd = ["python", "temp/" + fname]
+
+ subprc = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ _, error = subprc.communicate()
+ err = "".join(error.decode(encoding='utf-8'))
+
+ if subprc.returncode != 0:
+ print("\nSample code error found in ", filename, ":\n")
+ print(err)
+ status = 1
+ else:
+ status = 0
+ os.remove("temp/" + fname)
+ return status
+
+
+def test(file):
+ temp = []
+ src = open(file, 'r')
+ status_all = {}
+ extract_sample_code(src, status_all)
+ temp.append(status_all)
+ src.close()
+ return temp
+
+
+if os.path.isdir("temp"):
+ shutil.rmtree("temp")
+if os.path.isdir("infer_model"):
+ shutil.rmtree("infer_model")
+if os.path.isdir("image"):
+ shutil.rmtree("image")
+if os.path.isdir("my_paddle_model"):
+ shutil.rmtree("my_paddle_model")
+if os.path.isdir("my_paddle_vars"):
+ shutil.rmtree("my_paddle_vars")
+
+if not os.path.isdir("temp"):
+ os.mkdir("temp")
+
+output = []
+
+if len(sys.argv) < 2:
+ print("Error: inadequate number of arguments")
+ print("Please one file")
+ sys.exit(1)
+else:
+ if not os.path.exists(sys.argv[1]):
+ print("File not found")
+ sys.exit(1)
+ res = test(sys.argv[1])
+ output.append(res)
+
+status_groups = {-1: [], 0: [], 1: []}
+# polishes show format
+ci_pass = True
+for one_file in output:
+ for dicts in one_file:
+ for key in dicts:
+ status = dicts[key]
+ for ele in status:
+ if ele != 0:
+ ci_pass = False
+ break
+ if len(status) == 1:
+ status_groups[status[0]].append(key)
+ else:
+ for u in range(0, len(status)):
+ status_groups[status[u]].append(key + '_' + str(u + 1))
+
+error_api = status_groups[-1] + status_groups[1]
+total_error_number = len(error_api)
+
+print("****************************************************")
+print("----------------End of the Check--------------------")
+print("****************************************************")
+if total_error_number > 0:
+ print("Error sample code number is:{}".format(total_error_number))
+ type_one_number = len(status_groups[-1])
+ type_two_number = len(status_groups[1])
+ if type_one_number > 0:
+ print("Error type one sample number is:{}".format(type_one_number))
+ print("Error raised from type one:no sample code.",
+ str(status_groups[-1]))
+ if type_two_number > 0:
+ print("Error type two sample number is:{}".format(type_two_number))
+ print("Error raised from type two:running error sample code.",
+ str(status_groups[1]))
+if not ci_pass:
+ print("Mistakes found in sample codes.")
+ exit(1)
+else:
+ print("Sample code check is successful!")
diff --git a/ci_scripts/ci_start.sh b/ci_scripts/ci_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d1d2773fd3b4ea354af08a9bd02a2416734c397a
--- /dev/null
+++ b/ci_scripts/ci_start.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+export DIR_PATH=${PWD}
+
+/bin/bash ${DIR_PATH}/check_code.sh
+if [ $? -ne 0 ];then
+ echo "code format error"
+ exit 1
+fi
+
+/bin/bash -x ${DIR_PATH}/check_api_cn.sh
+if [ $? -ne 0 ];then
+ exit 1
+fi
+
+/bin/bash ${DIR_PATH}/checkapproval.sh
\ No newline at end of file
diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md
index 93f4d5cb950a1559928964ffe3de245600e43a6a..d9c1f4f5bd641fe1ca037ee499997cdedbcd408a 100644
--- a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md
+++ b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide.md
@@ -9,7 +9,24 @@
- 通过所有单元测试。
- 请遵守[提交代码的一些约定](#提交代码的一些约定)。
-以下教程将指导您提交代码。
+
+## 使用官方开发镜像(推荐)
+
+```
+# 第一次启动(CPU开发)
+docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash
+# 第一次启动(GPU开发)
+nvidia-docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash
+# 后面几次启动
+docker exec -it username bash
+```
+
+不同开发者启动docker的命令不一样,以上只是推荐命令。如果使用自己习惯的命令,一定要加参数--privileged(GPU的CUPTI库调用需要)
+
+**推荐使用官方开发镜像 hub.baidubce.com/paddlepaddle/paddle:latest-dev 提交代码。**
+
+**以下教程将指导您提交代码。**
+
## [Fork](https://help.github.com/articles/fork-a-repo/)
跳转到[PaddlePaddle](https://github.com/PaddlePaddle/Paddle) GitHub首页,然后单击 `Fork` 按钮,生成自己目录下的仓库,比如 。
@@ -42,7 +59,7 @@ Paddle 目前使用[Git流分支模型](http://nvie.com/posts/a-successful-git-b
Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 Git 预提交钩子。 它可以帮助我们格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。
-`pre-commit`测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它:
+`pre-commit`测试是 CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它:
```bash
➜ pip install pre-commit
@@ -51,7 +68,7 @@ Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 G
Paddle 使用 `clang-format` 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。
-注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。
+注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`,使用Paddle docker镜像会自带`pre-commit`不需要单独安装。
## 开始开发
@@ -66,19 +83,53 @@ Changes not staged for commit:
(use "git add ..." to update what will be committed)
(use "git checkout -- ..." to discard changes in working directory)
- modified: README.md
+ modified: README.md
Untracked files:
(use "git add ..." to include in what will be committed)
- test
+ test
no changes added to commit (use "git add" and/or "git commit -a")
```
-## 编译和单元测试
+## 编译
+
+创建并进入/Paddle/build路径下:
+
+ mkdir -p /Paddle/build && cd /Paddle/build
+
+执行cmake:
+
+
+ * 对于需要编译**CPU版本PaddlePaddle**的用户:
+
+ For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+ For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+
+ * 对于需要编译**GPU版本PaddlePaddle**的用户:
+
+ For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+ For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+
+执行编译:
+
+ make -j$(nproc)
+
+ 如:make -j16,使用16核编译
+
+安装编译好的whl包:首先进入/Paddle/build/python/dist目录下找到生成的.whl包后,然后当前机器或目标机器安装编译好的.whl包:
+
+ For Python2: pip install -U(whl包的名字)
+ For Python3: pip3.5 install -U(whl包的名字)
关于编译 PaddlePaddle 的源码,请参见[从源码编译](../../../install/compile/fromsource.html) 选择对应的操作系统。
+
+## 单元测试
+
+ 单测运行(重复运行多次,避免随机失败)如重复运行100次的命令如下:
+ ctest --repeat-until-fail 100 -R test_xx
+
关于单元测试,可参考[Op单元测试](../new_op/new_op.html#id7) 的运行方法。
## 提交(commit)
@@ -92,7 +143,7 @@ On branch test
Untracked files:
(use "git add ..." to include in what will be committed)
- test
+ test
nothing added to commit but untracked files present (use "git add" to track)
➜ git add test
@@ -115,15 +166,6 @@ clang-formater.......................................(no files to check)Skipped
create mode 100644 233
```
- 需要注意的是:您需要在commit中添加说明(commit message)以触发CI单测,写法如下:
-
-```bash
-# 触发develop分支的CI单测
-➜ git commit -m "test=develop"
-
-# 触发release/1.1分支的CI单侧
-➜ git commit -m "test=release/1.1"
-```
## 保持本地仓库最新
@@ -135,8 +177,8 @@ clang-formater.......................................(no files to check)Skipped
➜ git remote
origin
➜ git remote -v
-origin https://github.com/USERNAME/Paddle (fetch)
-origin https://github.com/USERNAME/Paddle (push)
+origin https://github.com/USERNAME/Paddle (fetch)
+origin https://github.com/USERNAME/Paddle (push)
```
这里 origin 是我们 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来我们创建一个原始 Paddle 仓库的远程主机,命名为 upstream。
diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md
index 48a65cb6573fef3aeb6a5cbc7d9b88cb52a7b5bf..3158b23326094b7a2da4f1f87445d6518ea5f57a 100644
--- a/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md
+++ b/doc/fluid/advanced_guide/addon_development/contribute_code/local_dev_guide_en.md
@@ -9,7 +9,22 @@ You will learn how to develop programs in local environment under the guidelines
- Pass through all unit tests.
- Please follow [regulations of submitting codes](#regulations of submitting codes).
-The following guidiance tells you how to submit code.
+## Use official development images(recommended)
+
+```
+# First start(CPU development)
+docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash
+# First start(GPU development)
+nvidia-docker run -it --cpu-shares=20000 --name=username --net=host --privileged --rm -v $(pwd):/Paddle hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash
+# Next start
+docker exec -it username bash
+```
+Different developers have different commands to start docker. The above are only recommended commands. If you use the command you are used to, you must add the parameter --privileged (needed by the GPU CUPTI library call)
+
+**It is recommended to use the official development mirror hub.baidubce.com/paddlepaddle/paddle:latest-dev to submit the code.**
+
+**The following guidiance tells you how to submit code.**
+
## [Fork](https://help.github.com/articles/fork-a-repo/)
Transfer to the home page of Github [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) ,and then click button `Fork` to generate the git under your own file directory,such as 。
@@ -44,7 +59,7 @@ It is worth noting that before the checkout, you need to keep the current branch
Paddle developers use the [pre-commit](http://pre-commit.com/) tool to manage Git pre-commit hooks. It helps us format the source code (C++, Python) and automatically check some basic things before committing (such as having only one EOL per file, not adding large files in Git, etc.).
-The `pre-commit` test is part of the unit test in Travis-CI. A PR that does not satisfy the hook cannot be submitted to Paddle. Install `pre-commit` first and then run it in current directory:
+The `pre-commit` test is part of the unit test in CI. A PR that does not satisfy the hook cannot be submitted to Paddle. Install `pre-commit` first and then run it in current directory:
```bash
@@ -54,7 +69,7 @@ The `pre-commit` test is part of the unit test in Travis-CI. A PR that does not
Paddle modify the format of C/C++ source code with `clang-format` .Make sure the version of `clang-format` is above 3.8.
-Note:There are differences between the installation of `yapf` with `pip install pre-commit` and that with `conda install -c conda-forge pre-commit` . Paddle developers use `pip install pre-commit` 。
+Note:There are differences between the installation of `yapf` with `pip install pre-commit` and that with `conda install -c conda-forge pre-commit` . Paddle developers use `pip install pre-commit`, Using Paddle docker image will `pre-commit`without separate installation .
## Start development
@@ -76,7 +91,45 @@ Untracked files:
no changes added to commit (use "git add" and/or "git commit -a")
```
-## Build and test
+## Build
+
+Create and enter the /Paddle/build path
+
+ mkdir -p /Paddle/build && cd /Paddle/build
+
+Execute cmake:
+
+
+ * For users who need to compile the **CPU version PaddlePaddle**:
+
+ For Python2: cmake .. -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+ For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+
+
+ * For users who need to compile the **GPU version PaddlePaddle**:
+
+ For Python2: cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+ For Python3: cmake .. -DPY_VERSION=3.5 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
+
+
+Execute compilation:
+
+ make -j$(nproc)
+
+ Such as: make -j16, using 16 core compilation
+
+After compiling successfully, go to the `/paddle/build/python/dist` directory and find the generated `.whl` package.Install the compiled .whl package on the current machine or target machine:
+
+ For Python2: pip install -U(whl package name)
+ For Python3: pip3.5 install -U(whl package name)
+
+Please refer to [Compile From Source Code](../../../install/compile/fromsource_en.html) about more information of building PaddlePaddle source codes.
+
+## Test
+
+ Run Test (Run 100 times)
+ ctest --repeat-until-fail 100 -R test_xx
+
Please refer to [Compile From Source Code](../../../install/compile/fromsource_en.html) about more information of building PaddlePaddle source codes.
Please refer to [Op Unit Tests](../new_op/new_op_en.html#unit-tests) about more information of running unit tests.
@@ -113,14 +166,6 @@ clang-formater.......................................(no files to check)Skipped
create mode 100644 233
```
- Attention needs to be paid:you need to add commit message to trigger CI test.The command is as follows:
-
-```bash
-# Touch CI single test of develop branch
-➜ git commit -m "test=develop"
-# Touch CI single test of release/1.1 branch
-➜ git commit -m "test=release/1.1"
-```
## Keep the latest local repository
diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md
index a43d38ee5470494757f59b94e9248010f1c7c775..06cb8c07caa63adbe3198d69c3727b2a0ba2ba11 100644
--- a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md
+++ b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide.md
@@ -26,7 +26,7 @@
-
+
diff --git a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md
index d71d92e3632d323e95fa45342c822183ab844e45..ee28c9f91eb62e710f58342044277a45898ee7eb 100644
--- a/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md
+++ b/doc/fluid/advanced_guide/addon_development/contribute_code/submit_pr_guide_en.md
@@ -26,7 +26,7 @@ For the first time to submit Pull Request,you need to sign CLA(Contributor Licen
-
+
diff --git a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md
index f56a35f1684504ec7b370342a0dc1ee8613061e3..0a321990f6aff2633c91de366c315d875d9425e5 100644
--- a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md
+++ b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea.md
@@ -56,12 +56,20 @@ blocks中包含:
block的概念与通用程序一致,例如在下列这段C++代码中包含三个block:
``` cpp
-int main(){ //block 0
- int i = 0;
- if (i<10){ //block 1
- for (int j=0;j<10;j++){ //block 2
- }
+#include
+
+int main() {
+ int x = 5; // block 0
+ int y = 4; // block 0
+ int out; // block 0
+
+ if (x < y) { // block 0
+ out = 1; // block 1
+ } else {
+ out = 0; // block 2
}
+
+ std::cout << out << std::endl;
return 0;
}
```
@@ -69,27 +77,20 @@ int main(){ //block 0
类似的,在下列 Paddle 的 Program 包含3段block:
```python
-import paddle.fluid as fluid # block 0
-
-limit = fluid.layers.fill_constant_batch_size_like(
- input=label, dtype='int64', shape=[1], value=5.0)
-cond = fluid.layers.less_than(x=label, y=limit)
-
-ie = fluid.layers.IfElse(cond)
-with ie.true_block(): # block 1
- true_image = ie.input(image)
- hidden = fluid.layers.fc(input=true_image, size=100, act='tanh')
- prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
- ie.output(prob)
-
-with ie.false_block(): # block 2
- false_image = ie.input(image)
- hidden = fluid.layers.fc(
- input=false_image, size=200, act='tanh')
- prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
- ie.output(prob)
-
-prob = ie()
+import paddle.fluid as fluid
+
+x = fluid.data(name='x', shape=[1], dtype='int64') # block 0
+y = fluid.data(name='y', shape=[1], dtype='int64') # block 0
+
+def true_block():
+ return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1
+
+def false_block():
+ return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2
+
+condition = fluid.layers.less_than(x, y) # block 0
+
+out = fluid.layers.cond(condition, true_block, false_block) # block 0
```
### BlockDesc and ProgramDesc
diff --git a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md
index 9cb2821a4964ca752d3be03631909a6dd6d9431a..1830096f1e53ab544348cbedb107962307773564 100644
--- a/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md
+++ b/doc/fluid/advanced_guide/addon_development/design_idea/fluid_design_idea_en.md
@@ -59,40 +59,41 @@ The blocks contain:
The concept of block is the same with that in generic programs. For example, there are three blocks in the following C++ code:
``` cpp
-int main(){ //block 0
- int i = 0;
- if (i<10){ //block 1
- for (int j=0;j<10;j++){ //block 2
- }
- }
- return 0;
+#include
+
+int main() {
+ int x = 5; // block 0
+ int y = 4; // block 0
+ int out; // block 0
+
+ if (x < y) { // block 0
+ out = 1; // block 1
+ } else {
+ out = 0; // block 2
+ }
+
+ std::cout << out << std::endl;
+ return 0;
}
```
Similarly, the following Program contains 3 blocks:
```python
-import paddle.fluid as fluid # block 0
-
-limit = fluid.layers.fill_constant_batch_size_like(
- Input=label, dtype='int64', shape=[1], value=5.0)
-cond = fluid.layers.less_than(x=label, y=limit)
-
-ie = fluid.layers.IfElse(cond)
-with ie.true_block(): # block 1
- true_image = ie.input(image)
- hidden = fluid.layers.fc(input=true_image, size=100, act='tanh')
- prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
- ie.output(prob)
-
-with ie.false_block(): # block 2
- false_image = ie.input(image)
- hidden = fluid.layers.fc(
- input=false_image, size=200, act='tanh')
- prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
- ie.output(prob)
-
-prob = ie()
+import paddle.fluid as fluid
+
+x = fluid.data(name='x', shape=[1], dtype='int64') # block 0
+y = fluid.data(name='y', shape=[1], dtype='int64') # block 0
+
+def true_block():
+ return fluid.layers.fill_constant(dtype='int64', value=1, shape=[1]) # block 1
+
+def false_block():
+ return fluid.layers.fill_constant(dtype='int64', value=0, shape=[1]) # block 2
+
+condition = fluid.layers.less_than(x, y) # block 0
+
+out = fluid.layers.cond(condition, true_block, false_block) # block 0
```
### BlockDesc and ProgramDesc
@@ -229,8 +230,8 @@ import numpy
train_data=numpy.array([[1.0],[2.0],[3.0],[4.0]]).astype('float32')
y_true = numpy.array([[2.0],[4.0],[6.0],[8.0]]).astype('float32')
# Define the network
-x = fluid.layers.data(name="x",shape=[1],dtype='float32')
-y = fluid.layers.data(name="y",shape=[1],dtype='float32')
+x = fluid.data(name="x",shape=[None, 1],dtype='float32')
+y = fluid.data(name="y",shape=[None, 1],dtype='float32')
y_predict = fluid.layers.fc(input=x,size=1,act=None)
#definition loss function
cost = fluid.layers.square_error_cost(input=y_predict,label=y)
@@ -299,7 +300,7 @@ As you can see from the output, the entire definition process is transformed int
BlockDesc contains defined vars and a series of ops. Take input x as an example. In python code, x is 1D data of data type "float 32":
```python
-x = fluid.layers.data(name="x",shape=[1],dtype='float32')
+x = fluid.data(name="x",shape=[None, 1],dtype='float32')
```
In BlockDesc, the variable x is described as:
```
@@ -348,7 +349,7 @@ Since there are multiple columns of incoming and outgoing data, fluid defines tr
```python
# Start training
outs = exe.run(
- feed={'x':train_data,'y':y_true},
+ feed={'x':train_data,'y':y_true},
fetch_list=[y_predict.name,avg_cost.name])
```
The above code defines that train_data is to be passed into the x variable, y_true is to be passed into the y variable, and output the predicted value of y and the last round value of cost.
diff --git a/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md b/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md
index f83220be29aab54716bb71cd144d3359a7e9d9ff..c8bf3b461d5e3be845c174c4e2daeecdd1bc326a 100644
--- a/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md
+++ b/doc/fluid/advanced_guide/addon_development/new_op/custom_op.md
@@ -77,14 +77,12 @@ class Relu2GradMaker : public framework::SingleGradOpMaker {
public:
using framework::SingleGradOpMaker::SingleGradOpMaker;
- std::unique_ptr Apply() const override {
- auto* op = new T();
+ void Apply(GradOpPtr op) const override {
op->SetType("relu2_grad");
op->SetInput("Y", this->Output("Y"));
op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
op->SetAttrMap(this->Attrs());
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
- return std::unique_ptr(op);
}
};
@@ -142,7 +140,7 @@ REGISTER_OP_CPU_KERNEL(relu2_grad,
-ReLU OP的GPU实现, ``relu_op.cc`` 文件:
+ReLU OP的GPU实现, ``relu_op.cu`` 文件:
```
// relu_op.cu
@@ -272,8 +270,8 @@ g++ relu_op.cc relu_op.cu.o -o relu2_op.so -shared -fPIC -std=c++11 -O3 -DPADDLE
注意点:
-1. NVCC编译GPU OP的cu文件时,需要加 `-DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO` 。
-2. 如果安装的PaddlePaddle不包含MKLDNN,则需要去掉编译选项`-DPADDLE_WITH_MKLDNN`。默认的安装包已包含MKLDNN。
+1. 通过NVCC编译CUDA源文件时,需要加编译选项 `-DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO`,在框架源码中会使用这些宏定义进行条件编译。用户自定义的C++ OP实现编译时,选项的开启状态需要和核心框架编译行为一致。如`EIGEN_USE_GPU`是使用Eigen数学库的GPU实现时需要增加的编译选项。
+2. 如果飞桨安装包中不包含MKLDNN库,则需要去掉编译选项`-DPADDLE_WITH_MKLDNN`。核心框架源码中(比如tensor.h)有使用此宏定义进行条件编译,该选项是否打开同样需要和核心框架编译行为保持一致。默认的飞桨安装包中含有MKLDNN库。
3. 可多个OP编译到同一个动态库中。
4. 通过pip方式安装的PaddlePaddle由GCC 4.8编译得到,由于GCC 4.8和GCC 5以上**C++11 ABI不兼容**,您编写的自定义OP,需要通过GCC 4.8编译。若是GCC 5及以上的环境上使用自定义OP,推荐使用[Docker安装PaddlePaddle](https://www.paddlepaddle.org.cn/install/doc/docker),使得编Paddle和编译自定义OP的GCC版本相同。
@@ -333,6 +331,11 @@ np.allclose(out, np.maximum(x,0.))
## FAQ
-1. Q:如果出现类似错误: cannot open shared object file: No such file or directory.
+1. Q: 如果出现类似错误: `relu2_op.so: cannot open shared object file: No such file or directory` 以及 `libpaddle_framework.so: cannot open shared object file: No such file or directory`。
- A: 需要设置动态库的路径到环境变量LD_LIBRARY_PATH中。
+ A: 需要将`relu2_op.so`所在路径以及`libpaddle_framework.so`路径(即`paddle.sysconfig.get_lib()`得到路径)设置到环境变量LD_LIBRARY_PATH中:
+
+ ```
+ # 假如relu2_op.so路径是:`paddle/test`,对于Linux环境设置:
+ export LD_LIBRARY_PATH=paddle/test:$( python -c 'import paddle; print(paddle.sysconfig.get_lib())'):$LD_LIBRARY_PATH
+ ```
diff --git a/doc/fluid/advanced_guide/addon_development/new_op/new_op.md b/doc/fluid/advanced_guide/addon_development/new_op/new_op.md
index cf1a72ddfa9c6980827b14691383254c3bfa8e85..7b22163538445e9929ff3e2684c0efd41a536ec3 100644
--- a/doc/fluid/advanced_guide/addon_development/new_op/new_op.md
+++ b/doc/fluid/advanced_guide/addon_development/new_op/new_op.md
@@ -61,6 +61,9 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(Tensor), The first input tensor of mul op.");
AddInput("Y", "(Tensor), The second input tensor of mul op.");
AddOutput("Out", "(Tensor), The output tensor of mul op.");
+ AddAttr("use_mkldnn",
+ "(bool, default false) Only used in mkldnn kernel")
+ .SetDefault(false);
AddAttr(
"x_num_col_dims",
R"DOC((int, default 1), The mul_op can take tensors with more than two
@@ -91,18 +94,34 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
)DOC")
.SetDefault(1)
.EqualGreaterThan(1);
+ AddAttr(
+ "scale_x",
+ "scale_x to be used for int8 mul input data x. scale_x has the"
+ "same purpose as scale_in in OPs that support quantization."
+ "Only to be used with MKL-DNN INT8")
+ .SetDefault(1.0f);
+ AddAttr>(
+ "scale_y",
+ "scale_y to be used for int8 mul input data y. scale_y has the"
+ "same purpose as scale_weights in OPs that support quantization."
+ "Only to be used with MKL-DNN INT8")
+ .SetDefault({1.0f});
+ AddAttr("scale_out",
+ "scale_out to be used for int8 output data."
+ "Only used with MKL-DNN INT8")
+ .SetDefault(1.0f);
+ AddAttr(
+ "force_fp32_output",
+ "(bool, default false) Force quantize kernel output FP32, only "
+ "used in quantized MKL-DNN.")
+ .SetDefault(false);
AddComment(R"DOC(
Mul Operator.
-
This operator is used to perform matrix multiplication for input $X$ and $Y$.
-
The equation is:
-
$$Out = X * Y$$
-
Both the input $X$ and $Y$ can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input $X$.
-
)DOC");
}
};
@@ -112,34 +131,34 @@ or not. But the output only shares the LoD information with input $X$.
开发者通过覆盖`framework::OpProtoAndCheckerMaker`中的`Make`函数来定义Op所对应的Proto,通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddAttr`添加属性参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。
-上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。
+上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,以及`use_mkldnn`等属性,并解释了各自含义,命名请遵守[命名规范](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/dev/name_convention.md)。
-### 定义GradProtoMaker类
-通常情况下,每个Op的会有一个对应的`GradProtoMaker`,为方便代码编写,fluid提供了默认的`GradProtoMaker`,即:`DefaultGradProtoMaker`。`DefaultGradProtoMaker`会使用前向Op的全部输入(`Input`)输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op的输入变量所对应的的梯度(`Input@Grad`)作为输出。
+### 定义GradOpMaker类
+通常情况下,大部分Op只有一个对应的反向Op,每个Op的会有一个对应的`GradOpMaker`。为方便代码编写,fluid为只有提供了一个模板类[`SingleGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L188)。`MulOp`的`GradOpMaker`需要继承这个模板类,并在`Apply()`方法中设置反向Op的输入、输出和属性。此外,fluid还提供了一个默认的`GradOpMaker`,
+[`DefaultGradOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/grad_op_desc_maker.h#L227),该模板类会使用前向Op的全部输入(`Input`)输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op的输入变量所对应的的梯度(`Input@Grad`)作为输出。
**注意:**
不要将反向Op不会用到的变量放到反向Op的输入列表中,这样会导致这些不会被反向Op用到的变量的空间不能够及时回收,进而有可能导致用到该Op的模型可以设置的batch_size较低。
-比如`relu`操作的前向操作为:`out.device(d) = x.cwiseMax(static_cast(0));`反向操作为:`dx.device(d) = dout * (out > static_cast(0)).template cast();`。显然,反向操作中只是用到了`out`、`dout`、`dx`,没有用到`x`。
+比如`relu`操作的前向操作为:`out.device(d) = x.cwiseMax(static_cast(0));`反向操作为:`dx.device(d) = dout * (out > static_cast(0)).template cast();`。显然,反向操作中只是用到了`out`、`dout`、`dx`,没有用到`x`。因此,通常不建议使用默认的`DefaultGradOpMaker`。
-下面示例定义了`MulOp`的GradProtoMaker。
+下面示例定义了`MulOp`的`GradOpMaker`。
```cpp
-class MulOpGradMaker : public framework::SingleGradOpDescMaker {
+template
+class MulOpGradMaker : public framework::SingleGradOpMaker {
public:
- using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
+ using framework::SingleGradOpMaker::SingleGradOpMaker;
protected:
- std::unique_ptr Apply() const override {
- std::unique_ptr retv(new framework::OpDesc());
+ void Apply(GradOpPtr retv) const override {
retv->SetType("mul_grad");
- retv->SetInput("X", Input("X"));
- retv->SetInput("Y", Input("Y"));
- retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
- retv->SetOutput(framework::GradVarName("X"), InputGrad("X"));
- retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y"));
- retv->SetAttrMap(Attrs());
- return retv;
+ retv->SetInput("X", this->Input("X"));
+ retv->SetInput("Y", this->Input("Y"));
+ retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
+ retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
+ retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
+ retv->SetAttrMap(this->Attrs());
}
};
```
@@ -148,7 +167,8 @@ class MulOpGradMaker : public framework::SingleGradOpDescMaker {
- 有些Op的前向逻辑和反向逻辑是一样的,比如[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/scale_op.cc).这种情况下,前向Op和反向Op的Kernel可以为同一个。
- 有些前向Op所对应的反向Op可能有多个,比如[`SumOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/sum_op.cc),这种情况下,`GradMaker`需要继承`framework::GradOpDescMakerBase`。
-- 有些Op的反向对应另一个Op的前向,比如[`SplitOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h),这种情况下,[`SplitGradMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h#L52)中定义的`SplitOp`反向Op的Type就是`concat`,
+- 有些Op的反向对应另一个Op的前向,比如[`SplitOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h),这种情况下,[`SplitGradMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/split_op.h#L157)中定义的`SplitOp`反向Op的Type就是`concat`,
+- 为高效地同时支持命令式编程模式(动态图)和声明式编程模式(静态图),`SingleGradOpMaker`是一个模板类,在注册Operator时需要同时注册`MulOpGradMaker`(声明式编程模式使用)和`MulOpGradMaker`(命令式编程模式使用)。
### 定义Operator类
@@ -159,12 +179,16 @@ class MulOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
- protected:
void InferShape(framework::InferShapeContext* ctx) const override {
- PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null.");
- PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null.");
- PADDLE_ENFORCE(ctx->HasOutput("Out"),
- "Output(Out) of MulOp should not be null.");
+ PADDLE_ENFORCE_EQ(
+ ctx->HasInput("X"), true,
+ platform::errors::NotFound("Input(X) of MulOp should not be null."));
+ PADDLE_ENFORCE_EQ(
+ ctx->HasInput("Y"), true,
+ platform::errors::NotFound("Input(Y) of MulOp should not be null."));
+ PADDLE_ENFORCE_EQ(
+ ctx->HasOutput("Out"), true,
+ platform::errors::NotFound("Output(Out) of MulOp should not be null."));
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
@@ -176,23 +200,42 @@ class MulOp : public framework::OperatorWithKernel {
<< " x_num_col_dims=" << x_num_col_dims
<< " y_num_col_dims=" << y_num_col_dims;
+ PADDLE_ENFORCE_NE(framework::product(y_dims), 0,
+ platform::errors::PreconditionNotMet(
+ "The Input variable Y(%s) has not "
+ "been initialized. You may need to confirm "
+ "if you put exe.run(startup_program) "
+ "after optimizer.minimize function.",
+ ctx->Inputs("Y").front()));
PADDLE_ENFORCE_GT(
x_dims.size(), x_num_col_dims,
- "The input tensor X's rank of MulOp should be larger than "
- "x_num_col_dims.");
+ platform::errors::InvalidArgument(
+ "The input tensor X's dimensions of MulOp "
+ "should be larger than x_num_col_dims. But received X's "
+ "dimensions = %d, X's shape = [%s], x_num_col_dims = %d.",
+ x_dims.size(), x_dims, x_num_col_dims));
PADDLE_ENFORCE_GT(
y_dims.size(), y_num_col_dims,
- "The input tensor Y's rank of MulOp should be larger than "
- "y_num_col_dims: %ld vs %ld",
- y_dims.size(), y_num_col_dims);
+ platform::errors::InvalidArgument(
+ "The input tensor Y's dimensions of MulOp "
+ "should be larger than y_num_col_dims. But received Y's "
+ "dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.",
+ y_dims.size(), y_dims, y_num_col_dims));
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
- PADDLE_ENFORCE_EQ(x_mat_dims[1], y_mat_dims[0],
- "First matrix's width must be equal with second matrix's "
- "height. %s, %s",
- x_mat_dims[1], y_mat_dims[0]);
+ PADDLE_ENFORCE_EQ(
+ x_mat_dims[1], y_mat_dims[0],
+ platform::errors::InvalidArgument(
+ "After flatten the input tensor X and Y to 2-D dimensions "
+ "matrix X1 and Y1, the matrix X1's width must be equal with matrix "
+ "Y1's height. But received X's shape = [%s], X1's shape = [%s], "
+ "X1's "
+ "width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = "
+ "%s.",
+ x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims,
+ y_mat_dims[0]));
std::vector output_dims;
output_dims.reserve(
static_cast(x_num_col_dims + y_dims.size() - y_num_col_dims));
@@ -208,10 +251,34 @@ class MulOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("Out", framework::make_ddim(output_dims));
ctx->ShareLoD("X", /*->*/ "Out");
}
+
+ framework::OpKernelType GetExpectedKernelType(
+ const framework::ExecutionContext& ctx) const {
+ framework::LibraryType library = framework::LibraryType::kPlain;
+ framework::DataLayout layout = framework::DataLayout::kAnyLayout;
+ int customized_type_value =
+ framework::OpKernelType::kDefaultCustomizedTypeValue;
+ auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
+#ifdef PADDLE_WITH_MKLDNN
+ if (library == framework::LibraryType::kPlain &&
+ platform::CanMKLDNNBeUsed(ctx)) {
+ library = framework::LibraryType::kMKLDNN;
+ layout = framework::DataLayout::kMKLDNN;
+
+ if (input_data_type == framework::DataTypeTrait::DataType() ||
+ input_data_type == framework::DataTypeTrait::DataType()) {
+ customized_type_value = kMULMKLDNNINT8;
+ }
+ }
+#endif
+
+ return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
+ library, customized_type_value);
+ }
};
```
-[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L22)继承自`OperatorWithKernel`。`public`成员:
+[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L30)继承自`OperatorWithKernel`。`public`成员:
```cpp
using framework::OperatorWithKernel::OperatorWithKernel;
@@ -226,15 +293,17 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
: OperatorWithKernel(type, inputs, outputs, attrs) {}
```
-还需要重写`InferShape`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`framework::InferShapeContext* ctx`,通过该参数可获取到输入输出以及属性。它的功能是:
+此外,Operator类通常需要重写`InferShape`接口,并在有必要时重写`GetExpectedKernelType`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`framework::InferShapeContext* ctx`,通过该参数可获取到输入输出以及属性。它的功能是:
- 做检查, 尽早报错:检查输入数据维度、类型等是否合法。
- 设置输出Tensor的形状以及LoD信息。
+`GetExpectedKernelType`接口OperatorWithKernel类中用于获取指定设备(例如CPU,GPU)上指定数据类型(例如double,float)的OpKernel的方法。该方法的重写可见请参考[写C++ OP相关注意事项](op_notes.html#getexpectedkerneltype)。
+
通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中
### InferShape区分 compile time 和 run time
-在我们的静态图网络中,`InferShape`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferShape就需要区分compile time 和 run time。
+在我们的声明式编程模式网络中,`InferShape`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferShape就需要区分compile time 和 run time。
以下两种情况需要区分compile time和 run time。
@@ -286,7 +355,7 @@ y_dim[i] = x_dim[i] + z_dim[i]
- 运算: -1和其他数做任何运算都要等于-1
**参考代码**
-1. 判断的实现方法可以参考cross_entropy_op.cc,cross_entropy_op 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致
+1. 判断的实现方法可以参考[cross_entropy_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/cross_entropy_op.cc#L39),cross_entropy_op 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致
```cpp
bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) ||
@@ -300,31 +369,35 @@ y_dim[i] = x_dim[i] + z_dim[i]
}
```
-2. 运算的实现可以参考concat_op.cc,concat在InferShape判断时,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。
+2. 运算的实现可以参考[concat_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/concat_op.cc#L59),concat在InferShape判断时,调用`ComputeAndCheckShape`,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。
```cpp
- auto out_dims = ins[0];
+ const size_t n = inputs_dims.size();
+ auto out_dims = inputs_dims[0];
size_t in_zero_dims_size = out_dims.size();
for (size_t i = 1; i < n; i++) {
for (size_t j = 0; j < in_zero_dims_size; j++) {
if (j == axis) {
- if (ctx->IsRuntime()) {
- out_dims[axis] += ins[i][j];
+ if (is_runtime) {
+ out_dims[axis] += inputs_dims[i][j];
} else {
- if (ins[i][j] == -1) {
+ if (inputs_dims[i][j] == -1) {
out_dims[axis] = -1;
} else {
- out_dims[axis] += ins[i][j];
+ out_dims[axis] += inputs_dims[i][j];
}
}
} else {
bool check_shape =
- ctx->IsRuntime() || (out_dims[j] > 0 && ins[i][j] > 0);
+ is_runtime || (out_dims[j] > 0 && inputs_dims[i][j] > 0);
if (check_shape) {
// check all shape in run time
- PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j],
- "Input tensors should have the same "
- "elements except the specify axis.");
+ PADDLE_ENFORCE_EQ(
+ inputs_dims[0][j], inputs_dims[i][j],
+ "ShapeError: Dimension %d in inputs' shapes must be equal. "
+ "But recevied input[0]'s shape = "
+ "[%s], input[%d]'s shape = [%s].",
+ j, inputs_dims[0], i, inputs_dims[i]);
}
}
}
@@ -332,7 +405,6 @@ y_dim[i] = x_dim[i] + z_dim[i]
```
-
### 定义OpKernel类
`MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数:
@@ -405,9 +477,12 @@ class MulKernel : public framework::OpKernel {
```cpp
namespace ops = paddle::operators;
- REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker,
- ops::MulOpGradMaker)
- REGISTER_OPERATOR(mul_grad, ops::MulGradOp)
+ REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpInferVarType,
+ ops::MulOpGradMaker,
+ ops::MulOpGradMaker);
+
+ REGISTER_OPERATOR(mul_grad, ops::MulGradOp);
+
REGISTER_OP_CPU_KERNEL(mul,
ops::MulKernel,
ops::MulKernel);
@@ -416,11 +491,7 @@ class MulKernel : public framework::OpKernel {
ops::MulGradKernel);
```
- 在上面的代码中:
-
- - `REGISTER_OPERATOR` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。
-
- - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。
+ 在上面的代码中,使用`REGISTER_OPERATOR`注册了`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,其`GradOpMaker`分别是`ops::MulOpGradMaker`(声明式编程模式使用)和`ops::MulOpGradMaker`(命令式编程模式使用),并使用`REGISTER_OPERATOR`注册`ops::MulGradOp`,类型名为`mul_grad`。然后,使用`REGISTER_OP_CPU_KERNEL`注册了`ops::MulKernel`类,并特化模板参数为设备为`paddle::platform::CPUPlace`、数据类型为`float`类型和`double`类型;同理,注册`ops::MulGradKernel`类。
- 在 `.cu`文件中注册CUDA Kernel。
@@ -442,27 +513,8 @@ class MulKernel : public framework::OpKernel {
**注意:**
-在运行Op时,框架系统会根据输入数据所在的设备、输入数据的类型等信息自动的选择合适的OpKernel,比如输入的数据是在GPU上,并且为`float`类型,框架系统会选择由`REGISTER_OP_CUDA_KERNEL`注册的`ops::MulKernel`。如果用户希望指定运行时可被调用的OpKernel,用户需要覆盖`framework::OperatorWithKernel`中的`GetExpectedKernelType`函数,比如`ConvOp`会根据属性`use_cudnn`为`false`还是为`true`决定是否调用cudnn库中提供的conv操作。
+在运行Op时,框架系统会根据输入数据所在的设备、输入数据的类型等信息自动的选择合适的OpKernel,比如输入的数据是在GPU上,并且为`float`类型,框架系统会选择由`REGISTER_OP_CUDA_KERNEL`注册的`ops::MulKernel`。如果用户希望指定运行时可被调用的OpKernel,用户需要覆盖`framework::OperatorWithKernel`中的`GetExpectedKernelType`函数,比如`MulOp`会根据属性`use_mkldnn`为`false`还是为`true`决定是否调用mkldnn库来完成计算。
-```
-framework::OpKernelType ConvOp::GetExpectedKernelType(
- const framework::ExecutionContext& ctx) const {
- int customized_type_value =
- framework::OpKernelType::kDefaultCustomizedTypeValue;
- framework::LibraryType library{framework::LibraryType::kPlain};
- auto input_data_type = ctx.Input("Input")->type();
- std::string data_format = ctx.Attr("data_format");
- framework::DataLayout layout = framework::StringToDataLayout(data_format);
-#ifdef PADDLE_WITH_CUDA
- if (ctx.Attr("use_cudnn")) {
- library = framework::LibraryType::kCUDNN;
- }
-#endif
- auto type = framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
- library, customized_type_value);
- return type;
-}
-```
### 编译
diff --git a/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md b/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md
index 69a42f0096a936b9ff718f8b73dbb5c18f0cd9dd..ddae81c39873944f11f3ee227ebd51e785943df6 100644
--- a/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md
+++ b/doc/fluid/advanced_guide/addon_development/new_op/op_notes.md
@@ -157,13 +157,31 @@ ShareDataWith的功能是使两个Tensor共享底层buffer,在调用这个操
目前稀疏梯度在做更新的时候会先对梯度做merge,即对相同参数的梯度做累加,然后做参数以及附加参数(如velocity)的更新。
### 8.显存优化
+
+#### 8.1 为可原位计算的Op注册Inplace
+有些Op的计算逻辑中,输出可以复用输入的显存空间,也可称为原位计算。例如[`reshape_op`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/reshape_op.cc)中,输出`Out`可以复用输入`X`的显存空间,因为该Op的计算逻辑不会改变`X`的实际数据,只是修改它的shape,输出和输入复用同一块显存空间不影响结果。对于这类OP,可以注册`Inlace`,从而让框架在运行时自动地进行显存优化。
+
+fluid提供了`DECLARE_INPLACE_OP_INFERER`宏用于注册`Inplace`,该宏第一个参数是一个类名,如`ReshapeOpInplaceInToOut`;第二个参数是一对复用的输入输出,以`{"X", "Out"}`的形式给出。在`REGISTER_OPERATOR`时,
+可以将类名传传入,从而为该Op注册`Inplace`。
+
+```
+DECLARE_INPLACE_OP_INFERER(ReshapeOpInplaceInToOut, {"X", "Out"});
+
+REGISTER_OPERATOR(
+ reshape, ops::ReshapeOp, ops::ReshapeOpMaker,
+ paddle::framework::DefaultGradOpMaker,
+ paddle::framework::DefaultGradOpMaker,
+ ops::ReshapeOpInplaceInToOut);
+```
+
+#### 8.2 减少OP中的无关变量
通常反向Op会依赖于前向Op的某些输入(Input)、输出(Output),以供反向Op计算使用。但有些情况下,反向Op不需要前向Op的所有输入和输出;有些情况下,反向Op只需要前向Op的部分输入和输出;有些情况下,反向Op只需要使用前向Op中输入和输出变量的Shape和LoD信息。若Op开发者在注册反向Op时,将不必要的前向Op输入和输出作为反向Op的输入,会导致这部分显存无法被框架现有的显存优化策略优化,从而导致模型显存占用过高。
所以在写注册反向Op时需要注意以下几点:
-- Fluid提供的`DefaultGradOpDescMaker`,默认会将前向op的所有输入(`Input`)、输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op输入所对应的梯度(`Input@Grad`)作为反向Op的输出。所以在使用`DefaultGradOpDescMaker`时需要考虑是否有些变量在计算中不被用到。
-- 如果`DefaultGradOpDescMaker`不能够满足需求,需要用户自己手动构建`GradOpDescMaker`,具体实现请参考[相关文档](new_op.html#permalink-4--gradprotomaker-);
-- 如果有些反向Op需要依赖前向Op的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的Buffer,且不能根据其他变量推断出该Shape和LoD,需要对该变量(以下称该变量为`X`)在反向Op中进行注册`NoNeedBufferVarsInference`。**一旦注册了`NoNeedBufferVarsIference`,反向op中就不能读写该变量对应的Tensor中的buffer,只能调用Tensor的dims()和lod()方法,同时,反向Op中的`GetExpectedKernelType()`必须要重写,并且`GetExpectedKernelType()`中不能访问`X`变量中Tensor的type()方法**。比如在`SliceOpGrad`中只会用到`Input`中变量的Shape信息,所以需要为对`Input`在`SliceOpGrad`上进行注册:
+- Fluid提供的`DefaultGradOpMaker`,默认会将前向op的所有输入(`Input`)、输出(`Output`)以及输出变量所对应的梯度(`Output@Grad`)作为反向Op的输入,将前向Op输入所对应的梯度(`Input@Grad`)作为反向Op的输出。所以在使用`DefaultGradOpMaker`时需要考虑是否有些变量在计算中不被用到。
+- 如果`DefaultGradOpMaker`不能够满足需求,需要用户自己手动构建`GradOpMaker`,具体实现请参考[相关文档](new_op.html#gradopmaker);
+- 如果有些反向Op需要依赖前向Op的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的Buffer,且不能根据其他变量推断出该Shape和LoD,则可以通过`DECLARE_NO_NEED_BUFFER_VARS_INFERER`接口对该变量(以下称该变量为`X`)在反向Op中进行注册`NoNeedBufferVars`。**一旦注册了`NoNeedBufferVars`,反向op中就不能读写该变量对应的Tensor中的buffer,只能调用Tensor的dims()和lod()方法,同时,反向Op中的`GetExpectedKernelType()`必须要重写,并且`GetExpectedKernelType()`中不能访问`X`变量中Tensor的type()方法**。比如在`SliceOpGrad`中只会用到`Input`中变量的Shape信息,所以需要为对`Input`在`SliceOpGrad`上进行注册:
```
namespace paddle {
namespace operators {
@@ -185,30 +203,44 @@ class SliceOpGrad : public framework::OperatorWithKernel {
};
-class SliceOpGradMaker : public framework::SingleGradOpDescMaker {
+template
+class SliceOpGradMaker : public framework::SingleGradOpMaker {
public:
- using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
+ using framework::SingleGradOpMaker::SingleGradOpMaker;
protected:
- std::unique_ptr Apply() const override {
- auto* bind = new framework::OpDesc();
- bind->SetInput("Input", Input("Input"));
- bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
- bind->SetOutput(framework::GradVarName("Input"), InputGrad("Input"));
- bind->SetAttrMap(Attrs());
+ void Apply(GradOpPtr bind) const override {
+ bind->SetInput("Input", this->Input("Input"));
+ if (this->HasInput("StartsTensor")) {
+ bind->SetInput("StartsTensor", this->Input("StartsTensor"));
+ }
+ if (this->HasInput("EndsTensor")) {
+ bind->SetInput("EndsTensor", this->Input("EndsTensor"));
+ }
+ if (this->HasInput("StartsTensorList")) {
+ bind->SetInput("StartsTensorList", this->Input("StartsTensorList"));
+ }
+ if (this->HasInput("EndsTensorList")) {
+ bind->SetInput("EndsTensorList", this->Input("EndsTensorList"));
+ }
+ bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
+ bind->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
+ bind->SetAttrMap(this->Attrs());
bind->SetType("slice_grad");
- return std::unique_ptr(bind);
}
};
-DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(SliceOpGradNoNeedBufferVarsInference,
- "Input");
+DECLARE_NO_NEED_BUFFER_VARS_INFERER(SliceOpGradNoNeedBufferVarsInference,
+ "Input");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker,
- ops::SliceOpGradMaker);
+ ops::SliceOpGradMaker,
+ ops::SliceOpGradMaker);
REGISTER_OPERATOR(slice_grad, ops::SliceOpGrad,
+ ops::SliceDoubleOpGradMaker,
+ ops::SliceDoubleOpGradMaker,
ops::SliceOpGradNoNeedBufferVarsInference);
```
diff --git a/doc/fluid/advanced_guide/data_preparing/feeding_data.rst b/doc/fluid/advanced_guide/data_preparing/feeding_data.rst
index 6fcf529f7174af29623b567bec2c248da174db01..c0749a418e1e380e748d0c1e4cfc0993a0b73d04 100644
--- a/doc/fluid/advanced_guide/data_preparing/feeding_data.rst
+++ b/doc/fluid/advanced_guide/data_preparing/feeding_data.rst
@@ -4,7 +4,7 @@
同步数据读取
##############
-PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层;
+PaddlePaddle Fluid支持使用 :code:`fluid.data()` 配置数据层;
再使用 Numpy Array 或者直接使用Python创建C++的
:code:`fluid.LoDTensor` , 通过 :code:`Executor.run(feed=...)` 传给
:code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` 。
@@ -12,29 +12,25 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层;
数据层配置
##########
-通过 :code:`fluid.layers.data()` 可以配置神经网络中需要的数据层。具体方法为:
+通过 :code:`fluid.data()` 可以配置神经网络中需要的数据层。具体方法为:
.. code-block:: python
import paddle.fluid as fluid
- image = fluid.layers.data(name="image", shape=[3, 224, 224])
- label = fluid.layers.data(name="label", shape=[1], dtype="int64")
+ image = fluid.data(name="image", shape=[None, 3, 224, 224])
+ label = fluid.data(name="label", shape=[None, 1], dtype="int64")
# use image/label as layer input
prediction = fluid.layers.fc(input=image, size=1000, act="softmax")
loss = fluid.layers.cross_entropy(input=prediction, label=label)
...
-上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.layers.data`
-创建的两个输入数据层。其中 :code:`image` 是 :code:`[3, 224, 224]` 维度的浮点数据;
-:code:`label` 是 :code:`[1]` 维度的整数数据。这里需要注意的是:
+上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.data`
+创建的两个输入数据层。其中 :code:`image` 是 :code:`[None, 3, 224, 224]` 维度的浮点数据;
+:code:`label` 是 :code:`[None, 1]` 维度的整数数据。这里需要注意的是:
-1. Fluid中默认使用 :code:`-1` 表示 batch size 维度,默认情况下会在 :code:`shape`
- 的第一个维度添加 :code:`-1` 。 所以 上段代码中, 我们可以接受将一个
- :code:`[32, 3, 224, 224]` 的numpy array传给 :code:`image` 。 如果想自定义batch size
- 维度的位置的话,请设置 :code:`fluid.layers.data(append_batch_size=False)` 。
- 请参考进阶使用中的 :ref:`user_guide_customize_batch_size_rank` 。
+1. Executor在执行的时候,会检查定义的数据层数据和feed的数据的 :code:`shape` 和 :code:`dtype` 是否一致,如果不一致,程序会报错退出。对于一些任务,在不同的轮数,数据的某些维度会变化,可以将维度的值设置为None,例如第0维会变化,可以将 :code:`shape` 设置为 :code:`[None, 3, 224, 224]` 。
2. Fluid中用来做类别标签的数据类型是 :code:`int64`,并且标签从0开始。可用数据类型请参考 :ref:`user_guide_paddle_support_data_types`。
@@ -69,17 +65,17 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层;
序列数据是PaddlePaddle Fluid支持的特殊数据类型,可以使用 :code:`LoDTensor` 作为
输入数据类型。它需要用户: 1. 传入一个mini-batch需要被训练的所有数据;
2.每个序列的长度信息。
-用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor`。
+用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor` 。
-传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level`。
-例如训练数据是词汇组成的句子,:code:`lod_level=1`;训练数据是 词汇先组成了句子,
-句子再组成了段落,那么 :code:`lod_level=2`。
+传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level` 。
+例如训练数据是词汇组成的句子,:code:`lod_level=1` ;训练数据是 词汇先组成了句子,
+句子再组成了段落,那么 :code:`lod_level=2` 。
例如:
.. code-block:: python
- sentence = fluid.layers.data(name="sentence", dtype="int64", shape=[1], lod_level=1)
+ sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1)
...
@@ -91,8 +87,8 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层;
)
})
-训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2`。
-他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]`。
+训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2` 。
+他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]` 。
如何分别设置ParallelExecutor中每个设备的训练数据
------------------------------------------------
@@ -123,36 +119,6 @@ PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层;
上述代码中,GPU0会训练 32 个样本,而 GPU1训练 16 个样本。
-.. _user_guide_customize_batch_size_rank:
-
-自定义BatchSize维度
--------------------
-
-PaddlePaddle Fluid默认batch size是数据的第一维度,以 :code:`-1` 表示。但是在高级
-使用中,batch_size 可以固定,也可以是其他维度或者多个维度来表示。这都需要设置
-:code:`fluid.layers.data(append_batch_size=False)` 来完成。
-
-1. 固定batch size维度
-
- .. code-block:: python
-
- image = fluid.layers.data(name="image", shape=[32, 784], append_batch_size=False)
-
- 这里,:code:`image` 永远是一个 :code:`[32, 784]` 大小的矩阵。
-
-2. 使用其他维度表示batch size
-
- .. code-block:: python
-
- sentence = fluid.layers.data(name="sentence",
- shape=[80, -1, 1],
- append_batch_size=False,
- dtype="int64")
-
- 这里 :code:`sentence` 的中间维度是batch size。这种数据排布会用在定长的循环神经
- 网络中。
-
-
.. _user_guide_paddle_support_data_types:
Fluid目前支持的数据类型
diff --git a/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst b/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst
index 09367520e77af892fd2ac4e8ce2d23541d15bcf3..9afedfa1082232d4a972343a9dbf8df88af8ee8e 100644
--- a/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst
+++ b/doc/fluid/advanced_guide/data_preparing/feeding_data_en.rst
@@ -4,7 +4,7 @@
Take Numpy Array as Training Data
#################################
-PaddlePaddle Fluid supports configuring data layer with :code:`fluid.layers.data()` .
+PaddlePaddle Fluid supports configuring data layer with :code:`fluid.data()` .
Then you can use Numpy Array or directly use Python to create C++
:code:`fluid.LoDTensor` , and then feed it to :code:`fluid.Executor` or :code:`fluid.ParallelExecutor`
through :code:`Executor.run(feed=...)` .
@@ -12,23 +12,23 @@ through :code:`Executor.run(feed=...)` .
Configure Data Layer
############################
-With :code:`fluid.layers.data()` , you can configure data layer in neural network. Details are as follows:
+With :code:`fluid.data()` , you can configure data layer in neural network. Details are as follows:
.. code-block:: python
import paddle.fluid as fluid
- image = fluid.layers.data(name="image", shape=[3, 224, 224])
- label = fluid.layers.data(name="label", shape=[1], dtype="int64")
+ image = fluid.data(name="image", shape=[None, 3, 224, 224])
+ label = fluid.data(name="label", shape=[None, 1], dtype="int64")
# use image/label as layer input
prediction = fluid.layers.fc(input=image, size=1000, act="softmax")
loss = fluid.layers.cross_entropy(input=prediction, label=label)
...
-In the code above, :code:`image` and :code:`label` are two input data layers created by :code:`fluid.layers.data` . :code:`image` is float data of shape :code:`[3, 224, 224]` ; :code:`label` is the int data of shape :code:`[1]` . Note that:
+In the code above, :code:`image` and :code:`label` are two input data layers created by :code:`fluid.data` . :code:`image` is float data of shape :code:`[None, 3, 224, 224]` ; :code:`label` is the int data of shape :code:`[None, 1]` . Note that:
-1. :code:`-1` is represented for the dimension of batch size by default in Fluid. And :code:`-1` is added to the first dimension of :code:`shape` by default. Therefore in the code above, it would be alright to transfer numpy array of :code:`[32, 3, 224, 224]` to :code:`image` . If you want to customize the position of the batch size dimension, please set :code:`fluid.layers.data(append_batch_size=False)` .Please refer to the tutorial in the advanced user guide: :ref:`user_guide_customize_batch_size_rank_en` .
+1. When the program is executing, executor will check whether the :code:`shape` and :code:`dtype` defined and feeded are consistent. If they are not consistent, the program will exit with an error. In some tasks, the dimension will change in different training steps. For this case, the value of the dimension can be set to None. For example, the :code:`shape` can be set to :code:`[None, 3, 224, 224]` when the 0th dimension will change.
2. Data type of category labels in Fluid is :code:`int64` and the label starts from 0. About the supported data types,please refer to :ref:`user_guide_paddle_support_data_types_en` .
@@ -76,7 +76,7 @@ For example:
.. code-block:: python
- sentence = fluid.layers.data(name="sentence", dtype="int64", shape=[1], lod_level=1)
+ sentence = fluid.data(name="sentence", dtype="int64", shape=[None, 1], lod_level=1)
...
@@ -122,32 +122,6 @@ For example:
In the code above, GPU0 will train 32 samples and GPU1 will train 16 samples.
-.. _user_guide_customize_batch_size_rank_en:
-
-Customize the BatchSize dimension
-------------------------------------
-
-Batch size is the first dimension of data by default in PaddlePaddle Fluid, indicated by :code:`-1` .But in advanced usage, batch_size could be fixed or respresented by other dimension or multiple dimensions, which could be implemented by setting :code:`fluid.layers.data(append_batch_size=False)` .
-
-1. fixed BatchSize dimension
-
- .. code-block:: python
-
- image = fluid.layers.data(name="image", shape=[32, 784], append_batch_size=False)
-
- Here :code:`image` is always a matrix with size of :code:`[32, 784]` .
-
-2. batch size expressed by other dimension
-
- .. code-block:: python
-
- sentence = fluid.layers.data(name="sentence",
- shape=[80, -1, 1],
- append_batch_size=False,
- dtype="int64")
-
- Here the middle dimension of :code:`sentence` is batch size. This type of data layout is applied in fixed-length recurrent neural networks.
-
.. _user_guide_paddle_support_data_types_en:
Data types supported by Fluid
diff --git a/doc/fluid/advanced_guide/data_preparing/reader.md b/doc/fluid/advanced_guide/data_preparing/reader.md
index bfba87966ea57c4c0d5a166077185b55727b024d..8647dd45bef5be20b41ded78e70e850bc98c2c7d 100644
--- a/doc/fluid/advanced_guide/data_preparing/reader.md
+++ b/doc/fluid/advanced_guide/data_preparing/reader.md
@@ -193,14 +193,3 @@ def image_reader_creator(image_path, label_path, n):
reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024)
paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...)
```
-
-### How is `paddle.train` implemented
-
-An example implementation of paddle.train is:
-
-```python
-def train(batch_reader, mapping, batch_size, total_pass):
- for pass_idx in range(total_pass):
- for mini_batch in batch_reader(): # this loop will never end in online learning.
- do_forward_backward(mini_batch, mapping)
-```
diff --git a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst
index 5509d34403aedd8fd92dd3978fed10b723073d0a..1988aee0ae578f584b723bdf38010945b264320d 100644
--- a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst
+++ b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start.rst
@@ -14,7 +14,7 @@
*
- [x] 成功安装Paddle Fluid,如果尚未安装,请参考 `快速开始 `_
+ [x] 成功安装Paddle Fluid,如果尚未安装,请参考 `快速开始 `_
*
[x] 学会最基本的单机训练方法,请参考 `单机训练 `_ 中描述的单卡训练,进行学习
@@ -113,7 +113,7 @@
main_function(args.is_local)
-* 说明:示例中使用的IO方法是dataset,想了解具体的文档和用法请参考 `Dataset API `_ 。示例中使用的 ``train_from_dataset`` 接口,想了解具体的文档和使用方法请参考 `Executor API `_ 。示例中的 ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` 表示引入参数服务器架构进行分布式训练,如果想更进一步了解Fleet API的更多选项和示例,请参考 `Fleet API `_
+* 说明:示例中使用的IO方法是dataset,想了解具体的文档和用法请参考 `Dataset API `_ 。示例中使用的 ``train_from_dataset`` 接口,想了解具体的文档和使用方法请参考 `Executor API `_ 。示例中的 ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet`` 表示引入参数服务器架构进行分布式训练,如果想更进一步了解Fleet API的更多选项和示例,请参考 `Fleet API `_
单机训练启动命令
diff --git a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst
index ad9868e38bf4c3f4b953749db45064436c972661..ff8ea39c02200f8397c9d3bd9454fd6d01214f51 100644
--- a/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst
+++ b/doc/fluid/advanced_guide/distributed_training/cluster_quick_start_en.rst
@@ -1,193 +1,159 @@
-.. _cluster_quick_start_en:
+Quick start for distributed training
+====================================
-Quick Start with Distributed Training
-==========================
+Distributed training with Fleet API
+-----------------------------------
-Preparation
---------------------
-In this article, we'll show you how to quickly start a PaddlePaddle distributed training task in a cluster. Before you start, do some preparatory work as follows:
-
-1. Prepare a connected training cluster. Here we use 4 training nodes with format ``*.paddlepaddle.com`` to represent the host name of the node. You can modify it according to the actual situation.
-
-2. Make sure you have read :ref:`install_steps` before you start and can run PaddlePaddle on all nodes of the cluster.
+Since Paddle Fluid `Release
+1.5.1 `__,
+it is officially recommended to use the Fleet API for distributed
+training. For the introduction of the Fleet API, please refer to `Fleet
+Design Doc `__.
-Example code
--------------
-
-Let's use a very simple linear regression model as an example to explain how to start a distributed training task with 2 pserver server nodes and 2 trainer nodes. You can save this code as ``dist_train.py`` .
+Preparation
+~~~~~~~~~~~
+
+- [x] Install Paddle Fluid. If not already installed, please refer to
+ `Beginner’s
+ Guide `__.
+- [x] Master the most basic single node training method. Please refer
+ to the single card training described in `Single-node
+ training `__.
+
+Click-through rate prediction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here, we will use a simple example, click-through rate prediction task,
+to illustrate how to configure Fleet API for distributed training, and
+gives an example by using a single node environment to simulate the
+distributed environment. The source code of the example comes from `CTR
+with
+Fleet `__.
+
+In order to facilitate learning, the example given here is a mixed code
+of single node and multi node. You can start single node or multi node
+tasks through different startup commands. For the part of obtaining data
+and the logic of data preprocessing, please refer to the source code and
+description of `CTR with
+Fleet `__.
.. code:: python
-
+ from __future__ import print_function
+ from args import parse_args
import os
- import paddle
import paddle.fluid as fluid
-
- # train reader
- BATCH_SIZE = 20
- EPOCH_NUM = 30
- BATCH_SIZE = 8
-
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.train(), buf_size=500),
- batch_size=BATCH_SIZE)
-
- def train():
- y = fluid.layers.data(name='y', shape=[1], dtype='float32')
- x = fluid.layers.data(name='x', shape=[13], dtype='float32')
- y_predict = fluid.layers.fc(input=x, size=1, act=None)
-
- loss = fluid.layers.square_error_cost(input=y_predict, label=y)
- avg_loss = fluid.layers.mean(loss)
- opt = fluid.optimizer.SGD(learning_rate=0.001)
- opt.minimize(avg_loss)
-
- place = fluid.CPUPlace()
- feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
- exe = fluid.Executor(place)
-
- # fetch distributed training environment setting
- training_role = os.getenv("PADDLE_TRAINING_ROLE", None)
- port = os.getenv("PADDLE_PSERVER_PORT", "6174")
- pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
- trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
- eplist = []
- for ip in pserver_ips.split(","):
- eplist.append(':'.join([ip, port]))
- pserver_endpoints = ",".join(eplist)
- trainers = int(os.getenv("PADDLE_TRAINERS"))
- current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
-
- t = fluid.DistributeTranspiler()
- t.transpile(
- trainer_id = trainer_id,
- pservers = pserver_endpoints,
- trainers = trainers)
-
- if training_role == "PSERVER":
- pserver_prog = t.get_pserver_program(current_endpoint)
- startup_prog = t.get_startup_program(current_endpoint, pserver_prog)
- exe.run(startup_prog)
- exe.run(pserver_prog)
- elif training_role == "TRAINER":
- trainer_prog = t.get_trainer_program()
+ import sys
+ from network_conf import ctr_dnn_model_dataset
+ import paddle.fluid.incubate.fleet.base.role_maker as role_maker
+
+ from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
+ from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
+
+ dense_feature_dim = 13
+ sparse_feature_dim = 10000001
+ batch_size = 100
+ thread_num = 10
+ embedding_size = 10
+ args = parse_args()
+
+ def main_function(is_local):
+ # common code for local training and distributed training
+ dense_input = fluid.layers.data(
+ name="dense_input", shape=[dense_feature_dim], dtype='float32')
+
+ sparse_input_ids = [
+ fluid.layers.data(name="C" + str(i), shape=[1], lod_level=1,
+ dtype="int64") for i in range(1, 27)]
+
+ label = fluid.layers.data(name="label", shape=[1], dtype="int64")
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_use_var([dense_input] + sparse_input_ids + [label])
+ pipe_command = "python criteo_reader.py %d" % sparse_feature_dim
+ dataset.set_pipe_command(pipe_command)
+ dataset.set_batch_size(batch_size)
+ dataset.set_thread(thread_num)
+
+ whole_filelist = ["raw_data/part-%d" % x
+ for x in range(len(os.listdir("raw_data")))]
+
+ dataset.set_filelist(whole_filelist)
+ loss, auc_var, batch_auc_var = ctr_dnn_model_dataset(
+ dense_input, sparse_input_ids, label, embedding_size,
+ sparse_feature_dim)
+
+ exe = fluid.Executor(fluid.CPUPlace())
+ def train_loop(epoch=20):
+ for i in range(epoch):
+ exe.train_from_dataset(program=fluid.default_main_program(),
+ dataset=dataset,
+ fetch_list=[auc_var],
+ fetch_info=["auc"],
+ debug=False)
+ # local training
+ def local_train():
+ optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
+ optimizer.minimize(loss)
exe.run(fluid.default_startup_program())
-
- for epoch in range(EPOCH_NUM):
- for batch_id, batch_data in enumerate(train_reader()):
- avg_loss_value, = exe.run(trainer_prog,
- feed=feeder.feed(batch_data),
- fetch_list=[avg_loss])
- if (batch_id + 1) % 10 == 0:
- print("Epoch: {0}, Batch: {1}, loss: {2}".format(
- epoch, batch_id, avg_loss_value[0]))
- # destory the resource of current trainer node in pserver server node
- exe.close()
+ train_loop()
+
+ # distributed training
+ def dist_train():
+ role = role_maker.PaddleCloudRoleMaker()
+ fleet.init(role)
+ strategy = DistributeTranspilerConfig()
+ strategy.sync_mode = False
+ optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
+ optimizer = fleet.distributed_optimizer(optimizer, strategy)
+ optimizer.minimize(loss)
+
+ if fleet.is_server():
+ fleet.init_server()
+ fleet.run_server()
+ elif fleet.is_worker():
+ fleet.init_worker()
+ exe.run(fluid.default_startup_program())
+ train_loop()
+ if is_local:
+ local_train()
else:
- raise AssertionError("PADDLE_TRAINING_ROLE should be one of [TRAINER, PSERVER]")
-
- train()
-
-
-Environment Variables
-------------------------------------
-
-When starting a distributed training task, different environment variables are used to represent different node roles, details as follows:
-
-.. list-table::
- :header-rows: 1
-
- * - Environment Variable
- - Data Type
- - Example
- - Description
- * - :code:`PADDLE_TRAINING_ROLE`
- - str
- - :code:`PSERVER,TRANERR`
- - role of current training node
- * - :code:`PADDLE_PSERVER_IPS`
- - str
- - :code:`ps0.paddlepaddle.com, ps1.paddlepaddle.com`
- - The IP addresses or hostnames of all pserver nodes in the distributed training task, separated by ","
- * - :code:`PADDLE_PSERVER_PORT`
- - int
- - 6174
- - port that the pserver process listens to
- * - :code:`PADDLE_TRAINERS`
- - int
- - 2
- - Number of trainer nodes in a distributed training task
- * - :code:`PADDLE_CURRENT_IP`
- - str
- - :code:`ps0.paddlepaddle.com`
- - IP address or hostname of the current pserver node
- * - :code:`PADDLE_TRAINER_ID`
- - str
- - 0
- - ID of the current trainer node (unique), in the range of [0, PADDLE_TRAINERS)
-
-**Note:** Environment variables are just a way to get runtime information. In practical tasks, you can use command line parameters to obtain runtime information.
-
-API related to Distributed Training
----------------------------------
-
-DistributeTranspiler
-~~~~~~~~~~~~~~~~~~~~~~
-
-The machines in distributed training tasks based on the pserver-trainer architecture are divided into two roles: Parameter Server (pserver) and trainer. In Fluid, users only need to configure the network configuration required for single node training. The ``DistributeTranspiler`` module automatically modifies the single-node network settings into settings on which pserver and trainer needs to run based on the role of current training node:
+ dist_train()
-.. code:: python
+ if __name__ == '__main__':
+ main_function(args.is_local)
- t = fluid.DistributeTranspiler()
- t.transpile(
- trainer_id = trainer_id,
- pservers = pserver_endpoints,
- trainers = trainers)
- if PADDLE_TRAINING_ROLE == "TRAINER":
- # fetch the trainer program and execute it
- trainer_prog = t.get_trainer_program()
- ...
+- Note: The IO method used in this example is dataset, please refer to
+ `Dataset
+ API `__
+ for specific documents and usage. For the ``train_from_dataset``
+ interface, please refer to `Executor
+ API `__.
+ ``from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet``
+ in this example means to introduce parameter server architecture for
+ distributed training, which you can refer to `Fleet
+ API `__
+ for getting more about the options and examples of Fleet API.
- elif PADDLE_TRAINER_ROLE == "PSERVER":
- # fetch the pserver program and execute it
- pserver_prog = t.get_pserver_program(current_endpoint)
- ...
+Start command of single node training
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. code:: bash
-Exe.close()
-~~~~~~~~~~~~~~
+ python train.py --is_local 1
+Start command of single machine simulation distributed training
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The status information of all trainer nodes is saved in the pserver node. When trainer finishes training, ``exe.close()`` should be called to notify all PServer nodes to release the resources of the current Trainer nodes:
+Here we use launch\_ps, a built-in launcher of paddle, which users can
+specify the number of workers and servers to start the parameter server
+tasks.
-.. code:: python
+.. code:: bash
+
+ python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py
- exe = fluid.Executor(fluid.CPUPlace())
- # training process ...
- exe.close() # notify PServer to destory the resource
-
-Note: every trainer needs to call exe.close() when the trainer finishes.
-
-Start a Distributed Training Task
-----------------------------------
-
-.. list-table::
- :header-rows: 1
-
-
- * - Start Node
- - Start Command
- - Description
- * - ps0.paddlepaddle.com
- - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps0.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py`
- - Start pserver node
- * - ps1.paddlepaddle.com
- - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps1.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py`
- - Start pserver node
- * - trainer0.paddlepaddle.com
- - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=0 PADDLE_PSERVER_PORT=6174 python fluid_dist.py`
- - Start the number 0 Trainer Node
- * - trainer1.paddlepaddle.com
- - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com, ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=1 PADDLE_PSERVER_PORT=6174 python fluid_dist.py`
- - Start the number 1 trainer node
+The task running log can be viewed in the logs directory of the working
+directory. When you can use a single machine to simulate distributed
+training, you can perform true multi node distributed training. We
+recommend that users refer directly to
+`百度云运行分布式任务的示例 `__.
diff --git a/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst b/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst
index 15d5508a914563dc20e1a33cac352dee7352213f..21f3ea861452ee7bf1dc12fdd65bd8fdcbc8ea6c 100644
--- a/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst
+++ b/doc/fluid/advanced_guide/distributed_training/fleet_api_howto_cn.rst
@@ -51,8 +51,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用
from nets import mlp
from utils import gen_data
- input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
- input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
+ input_x = fluid.data(name="x", shape=[None, 32], dtype='float32')
+ input_y = fluid.data(name="y", shape=[None, 1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
@@ -79,8 +79,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用
from paddle.fluid.incubate.fleet.base import role_maker
from utils import gen_data
- input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
- input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
+ input_x = fluid.data(name="x", shape=[None, 32], dtype='float32')
+ input_y = fluid.data(name="y", shape=[None, 1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
@@ -119,8 +119,8 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用
from paddle.fluid.incubate.fleet.base import role_maker
from utils import gen_data
- input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
- input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
+ input_x = fluid.data(name="x", shape=[None, 32], dtype='float32')
+ input_y = fluid.data(name="y", shape=[None, 1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
diff --git a/doc/fluid/advanced_guide/distributed_training/index_cn.rst b/doc/fluid/advanced_guide/distributed_training/index_cn.rst
index 79daa2718e7d2a4a9205cebd34a654bda56b6264..1d2ad003bfd174ebd5c903e68acf3b3001fe3dfb 100644
--- a/doc/fluid/advanced_guide/distributed_training/index_cn.rst
+++ b/doc/fluid/advanced_guide/distributed_training/index_cn.rst
@@ -6,5 +6,4 @@
:maxdepth: 1
cluster_quick_start.rst
- cluster_howto.rst
fleet_api_howto_cn.rst
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md b/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md
new file mode 100644
index 0000000000000000000000000000000000000000..40bc3e002eadc30f03383cf4d262931179001322
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/debugging_cn.md
@@ -0,0 +1,204 @@
+# 调试方法
+
+本节内容将介绍动态图转静态图(下文简称:动转静)推荐的几种调试方法。
+
+> **注解:**
+>
+> 请确保转换前的动态图代码能够成功运行,建议使用 [paddle.jit.ProgramTranslator().enable(False)](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable)关闭动转静功能,直接运行动态图,如下:
+
+```python
+import paddle
+import numpy as np
+paddle.disable_static()
+# 关闭动转静动能
+paddle.jit.ProgramTranslator().enable(False)
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+func(np.ones([3, 2]))
+```
+
+## 断点调试
+使用动转静功能时,您可以使用断点调试代码。
+例如,在代码中,调用 `pdb.set_trace()`:
+```Python
+import pdb
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ pdb.set_trace()
+ if x > 3:
+ x = x - 1
+ return x
+```
+执行以下代码,将会在转化后的静态图代码中使用调试器:
+```Python
+func(np.ones([3, 2]))
+```
+
+运行结果:
+```bash
+> /tmp/tmpR809hf.py(6)func()
+-> def true_fn_0(x):
+(Pdb) n
+> /tmp/tmpR809hf.py(6)func()
+-> def false_fn_0(x):
+...
+```
+
+如果您想在原始的动态图代码中使用调试器,请先调用 [`paddle.jit.ProgramTranslator().enable(False)`](../../api_cn/dygraph_cn/ProgramTranslator_cn.html#enable),如下:
+```python
+paddle.jit.ProgramTranslator().enable(False)
+func(np.ones([3, 2]))
+```
+运行结果:
+```bash
+> (10)func()
+-> if x > 3:
+...
+
+```
+
+## 打印转换后的代码
+您可以打印转换后的静态图代码,有2种方法:
+
+1. 使用被装饰后的函数的 `code` 属性
+ 如下代码中,装饰器 `paddle.jit.to_static` 会将函数 `func` 转化为一个类对象 `StaticLayer`,可以使用 StaticLayer 的 `code` 属性来获得转化后的代码。
+ ```Python
+ @paddle.jit.to_static
+ def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+ print(func.code)
+ ```
+ 运行结果:
+
+ ```bash
+
+ def func(x):
+ x = fluid.layers.assign(x)
+
+ def true_fn_0(x):
+ x = x - 1
+ return x
+
+ def false_fn_0(x):
+ return x
+ x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x >
+ 3, true_fn_0, false_fn_0, (x,), (x,), (x,))
+ return x
+ ```
+
+2. 使用 `set_code_level(level)` 或环境变量 `TRANSLATOR_CODE_LEVEL=level`
+
+ 通过调用 `set_code_level` 或设置环境变量 `TRANSLATOR_CODE_LEVEL`,可以在日志中查看转换后的代码:
+
+ ```python
+ @paddle.jit.to_static
+ def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+ paddle.jit.set_code_level() # 也可设置 os.environ["TRANSLATOR_CODE_LEVEL"] = '100',效果相同
+ func(np.ones([1]))
+ ```
+ 运行结果:
+
+ ```bash
+ 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code:
+ def func(x):
+ x = fluid.layers.assign(x)
+
+ def true_fn_0(x):
+ x = x - 1
+ return x
+
+ def false_fn_0(x):
+ return x
+ x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x >
+ 3, true_fn_0, false_fn_0, (x,), (x,), (x,))
+ return x
+ ```
+ `set_code_level` 函数可以设置查看不同的AST Transformer转化后的代码,详情请见 [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_cn.html)。
+
+## 使用 `print`
+`print` 函数可以用来查看变量,该函数在动转静中会被转化。当仅打印 Paddle Tensor 时,实际运行时会被转换为 Paddle 算子 [Print](../../api_cn/layers_cn/Print_cn.html),否则仍然运行 `print`。
+```python
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+
+ # 打印x,x是Paddle Tensor,实际运行时会运行Paddle Print(x)
+ print(x)
+
+ # 打印注释,非Paddle Tensor,实际运行时仍运行print
+ print("Here call print function.")
+
+ if len(x) > 3:
+ x = x - 1
+ else:
+ x = paddle.ones(shape=[1])
+ return x
+
+func(np.ones([1]))
+```
+
+运行结果:
+```bash
+Variable: assign_0.tmp_0
+ - lod: {}
+ - place: CPUPlace
+ - shape: [1]
+ - layout: NCHW
+ - dtype: double
+ - data: [1]
+Here call print function.
+```
+
+## 日志打印
+ProgramTranslator在日志中记录了额外的调试信息,以帮助您了解动转静过程中函数是否被成功转换。
+您可以调用 [`paddle.jit.set_verbosity(level)`]((../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_cn.html)) 或设置环境变量 `TRANSLATOR_VERBOSITY=level` 来设置日志详细等级,并查看不同等级的日志信息。目前,`level` 可以取值0-3:
+- 0: 无日志
+- 1: 包括了动转静转化流程的信息,如转换前的源码、转换的可调用对象
+- 2: 包括以上信息,还包括更详细函数转化日志
+- 3: 包括以上信息,以及更详细的动转静日志
+
+> **注意:**
+>
+> 日志中包括了源代码等信息,请在共享日志前确保它不包含敏感信息。
+
+可以在代码运行前调用 `paddle.jit.set_verbosity` 控制日志详细程度:
+```python
+paddle.jit.set_verbosity(3)
+```
+或者设置环境变量 `TRANSLATOR_VERBOSITY`:
+```python
+import os
+os.environ["TRANSLATOR_VERBOSITY"] = '3'
+```
+
+运行结果:
+```bash
+2020-XX-XX 00:00:00,123-Level 1: Source code:
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ if len(x) > 3:
+ x = x - 1
+ else:
+ x = paddle.ones(shape=[1])
+ return x
+
+2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert .
+```
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md b/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md
new file mode 100644
index 0000000000000000000000000000000000000000..2ca87c976b0a9fc30dc588d1b4a8f814463f42de
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/debugging_en.md
@@ -0,0 +1,202 @@
+# Debugging Methods
+
+This section will introduce several debugging methods recommended by Dynamic Graph to Static Graph (hereafter called Dynamic-to-Staic).
+
+> **NOTE:**
+>
+> Please ensure that the dynamic graph code before transformation can run successfully. It is recommended to call [paddle.jit.ProgramTranslator().enable(False)](../../api/dygraph/ProgramTranslator_en.html#enable) to disable Dynamic-to-Static, and run dynamic graph code as follows:
+
+
+```python
+import paddle
+import numpy as np
+paddle.disable_static()
+
+# Disable Dynamic-to-Static
+paddle.jit.ProgramTranslator().enable(False)
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+func(np.ones([3, 2]))
+```
+
+## Breakpoint Debugging
+When using Dynamic-to-Static, you can use breakpoints to debug.
+
+For example, call `pdb.set_trace()` in your code:
+```Python
+import pdb
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ pdb.set_trace()
+ if x > 3:
+ x = x - 1
+ return x
+```
+Executing the following code will land the debugger in the transformed static graph code:
+```Python
+func(np.ones([3, 2]))
+```
+
+```bash
+> /tmp/tmpR809hf.py(6)func()
+-> def true_fn_0(x):
+(Pdb) n
+> /tmp/tmpR809hf.py(6)func()
+-> def false_fn_0(x):
+...
+```
+
+Calling [`paddle.jit.ProgramTranslator().enable(False)`](../../api/dygraph/ProgramTranslator_en.html#enable) before executing the code will land the debugger in the original dynamic graph code:
+```python
+paddle.jit.ProgramTranslator().enable(False)
+func(np.ones([3, 2]))
+```
+
+```bash
+> (10)func()
+-> if x > 3:
+...
+
+```
+
+## Print Transformed Code
+
+There are two ways to print the transformed static graph code:
+
+1. Use the attribute `code` of the decorated function
+
+ In the following code, the decorator `paddle.jit.to_static` transforms `func` into a class object `StaticLayer`. You can use the `code` attribute of `StaticLayer` to get the transformed code.
+ ```Python
+ @paddle.jit.to_static
+ def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+ print(func.code)
+ ```
+ ```bash
+
+ def func(x):
+ x = fluid.layers.assign(x)
+
+ def true_fn_0(x):
+ x = x - 1
+ return x
+
+ def false_fn_0(x):
+ return x
+ x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x >
+ 3, true_fn_0, false_fn_0, (x,), (x,), (x,))
+ return x
+ ```
+2. Call `set_code_level(level)` or set environment variable `TRANSLATOR_CODE_LEVEL=level`
+
+ You can view the transformed code in the log by calling `set_code_level` or set environment variable `TRANSLATOR_CODE_LEVEL`.
+
+ ```python
+ @paddle.jit.to_static
+ def func(x):
+ x = paddle.to_tensor(x)
+ if x > 3:
+ x = x - 1
+ return x
+
+ paddle.jit.set_code_level() # the same effect to set os.environ["TRANSLATOR_CODE_LEVEL"] = '100'
+ func(np.ones([1]))
+ ```
+
+ ```bash
+ 2020-XX-XX 00:00:00,980-INFO: After the level 100 ast transformer: 'All Transformers', the transformed code:
+ def func(x):
+ x = fluid.layers.assign(x)
+
+ def true_fn_0(x):
+ x = x - 1
+ return x
+
+ def false_fn_0(x):
+ return x
+ x = fluid.dygraph.dygraph_to_static.convert_operators.convert_ifelse(x >
+ 3, true_fn_0, false_fn_0, (x,), (x,), (x,))
+ return x
+ ```
+ `set_code_level` can set different levels to view the code transformed by different ast transformers. For details, please refer to [set_code_level](../../../paddle/api/paddle/fluid/dygraph/jit/set_code_level_en.html)。
+
+## `print`
+You can call `print` to view variables. `print` will be transformed when using Dynamic-to-Static. When only Paddle Tensor is printed, `print` will be transformed and call Paddle operator [Print](../../api/layers/Print.html) in runtime. Otherwise, call python `print`.
+
+```python
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ # x is a Paddle Tensor, so it will run Paddle Print(x) actually.
+ print(x)
+
+ # The string is not a Paddle Tensor, so it will run print as-is.
+ print("Here call print function.")
+
+ if len(x) > 3:
+ x = x - 1
+ else:
+ x = paddle.ones(shape=[1])
+ return x
+
+func(np.ones([1]))
+```
+
+```bash
+Variable: assign_0.tmp_0
+ - lod: {}
+ - place: CPUPlace
+ - shape: [1]
+ - layout: NCHW
+ - dtype: double
+ - data: [1]
+Here call print function.
+```
+
+## Log Printing
+ProgramTranslator can log additional debugging information to help you know whether the function was successfully transformed or not.
+
+You can call [`paddle.jit.set_verbosity(level)`](../../../paddle/api/paddle/fluid/dygraph/jit/set_verbosity_en.html) or set environment variable `TRANSLATOR_VERBOSITY=level` to enable logging and view logs of different levels. The argument `level` varies from 0 to 3:
+- 0: no logging
+- 1: includes the information in Dynamic-to-Static tranformation process, such as the source code not transformed, the callable object to transform and so on
+- 2: includes above and more detailed function transformation logs
+- 3: includes above and extremely verbose logging
+
+> **WARNING:**
+>
+> The logs includes information such as source code. Please make sure logs don't contain any sensitive information before sharing them.
+
+You can call `paddle.jit.set_verbosity` to control the verbosity level of logs:
+```python
+paddle.jit.set_verbosity(3)
+```
+or use the environment variable `TRANSLATOR_VERBOSITY`:
+```python
+import os
+os.environ["TRANSLATOR_VERBOSITY"] = '3'
+```
+
+```bash
+2020-XX-XX 00:00:00,123-Level 1: Source code:
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ if len(x) > 3:
+ x = x - 1
+ else:
+ x = paddle.ones(shape=[1])
+ return x
+
+2020-XX-XX 00:00:00,152-Level 1: Convert callable object: convert .
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md
new file mode 100644
index 0000000000000000000000000000000000000000..bb92cb80aa7a2485e2203177be2b3a4813602d91
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_cn.md
@@ -0,0 +1,160 @@
+# 报错信息处理
+
+本节内容将介绍使用动态图转静态图(下文简称:动转静)功能发生异常时,[ProgramTranslator](./program_translator_cn.html)对报错信息做的处理,以帮助您更好地理解动转静报错信息。使用动转静功能运行动态图代码时,内部可以分为2个步骤:动态图代码转换成静态图代码,运行静态图代码。接下来将分别介绍这2个步骤中的异常报错情况。
+
+## 动转静过程中的异常
+在动态图代码转换成静态图代码的过程中,如果ProgramTranslator无法转换一个函数时,将会显示警告信息,并尝试直接运行该函数。
+如下代码中,函数 `inner_func` 在调用前被转换成静态图代码,当 `x = inner_func(data)` 调用该函数时,不能重复转换,会给出警告信息:
+
+```python
+import paddle
+import numpy as np
+
+paddle.disable_static()
+
+@paddle.jit.to_static
+def func():
+ def inner_func(x):
+ x_tensor = paddle.to_tensor(x)
+ return x_tensor
+ data = np.ones([3]).astype("int32")
+ x = inner_func(data)
+ return x
+func()
+```
+
+ProgramTranslator打印的警告信息如下:
+
+```bash
+WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is.
+```
+
+## 运行转换后的代码报错
+
+如果在动转静后的静态图代码中发生异常,ProgramTranslator 会捕获该异常,增强异常报错信息,将静态图代码报错行映射到转换前的动态图代码,并重新抛出该异常。
+重新抛出的异常具有以下特点:
+
+- 隐藏了部分对用户无用的动转静过程调用栈;
+- 转换前的代码会给出提示:"In User Code:";
+- 报错信息中包含了转换前的原始动态图代码;
+
+例如,运行以下代码,在静态图构建时,即编译期会抛出异常:
+
+```python
+import paddle
+import numpy as np
+
+paddle.disable_static()
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ x = paddle.reshape(x, shape=[-1, -1])
+ return x
+
+func(np.ones([3, 2]))
+```
+
+运行结果:
+```bash
+Traceback (most recent call last):
+ in ()
+ func(np.ones([3, 2]))
+ File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__
+ raise new_exception
+AssertionError: In user code:
+
+ File "", line 7, in func
+ x = fluid.layers.reshape(x, shape=[-1, -1])
+ File "paddle/fluid/layers/nn.py", line 6193, in reshape
+ attrs["shape"] = get_attr_shape(shape)
+ File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape
+ "be -1. But received shape[%d] is also -1." % dim_idx)
+ AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1.
+```
+
+上述报错信息可以分为3点:
+
+1. 报错栈中,涉及代码转换过程的信息栈默认会被隐藏,不进行展示,以减少干扰信息。
+
+2. ProgramTranslator处理后的报错信息中,会包含提示"In user code:",表示之后的报错栈中,包含动转静前的动态图代码,即用户写的代码:
+ ```bash
+ AssertionError: In user code:
+
+ File "", line 7, in func
+ x = fluid.layers.reshape(x, shape=[-1, -1])
+ File "paddle/fluid/layers/nn.py", line 6193, in reshape
+ attrs["shape"] = get_attr_shape(shape)
+ File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape
+ "be -1. But received shape[%d] is also -1." % dim_idx)
+ ```
+ 其中,`File "", line 7, in func` 是转换前的代码位置信息,`x = fluid.layers.reshape(x, shape=[-1, -1])` 是转换前的代码。
+
+3. 新的异常中,包含原始报错中的的报错信息,如下:
+ ```bash
+ AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1.
+ ```
+
+运行以下代码,在静态图运行时,即运行期会抛出异常:
+
+```Python
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ two = paddle.fill_constant(shape=[1], value=2, dtype="int32")
+ x = paddle.reshape(x, shape=[1, two])
+ return x
+
+func(np.ones([3]).astype("int32"))
+```
+
+运行结果:
+
+```bash
+Traceback (most recent call last):
+ File "", line 10, in ()
+ func(np.ones([3]).astype("int32"))
+ File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__
+ raise new_exception
+
+EnforceNotMet: In user code:
+
+ File "", line 7, in func
+ x = paddle.reshape(x, shape=[1, two])
+ File "paddle/tensor/manipulation.py", line 1347, in reshape
+ return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
+ File "paddle/fluid/layers/nn.py", line 6209, in reshape
+ "XShape": x_shape})
+ File "paddle/fluid/layer_helper.py", line 43, in append_op
+ return self.main_program.current_block().append_op(*args, **kwargs)
+ File "paddle/fluid/framework.py", line 2880, in append_op
+ attrs=kwargs.get("attrs", None))
+ File "paddle/fluid/framework.py", line 1977, in __init__
+ for frame in traceback.extract_stack():
+
+--------------------------------------
+C++ Traceback (most recent call last):
+--------------------------------------
+0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool)
+1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&)
+2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&)
+3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&)
+4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const
+5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool)
+6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
+7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
+8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const
+9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const
+10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&)
+11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
+12 paddle::platform::GetCurrentTraceBackString()
+
+----------------------
+Error Message Summary:
+----------------------
+InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2.
+ [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206)
+ [operator < reshape2 > error] [operator < run_program > error]
+```
+
+上述异常中,除了隐藏部分报错栈、报错定位到转换前的动态图代码外,报错信息中包含了C++报错栈 `C++ Traceback` 和 `Error Message Summary`,这是 Paddle 的 C++ 端异常信息,经处理后在 Python 的异常信息中显示。
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md
new file mode 100644
index 0000000000000000000000000000000000000000..22c9eb8b37a93c68c0e20a83844be14a560bd6a7
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/error_handling_en.md
@@ -0,0 +1,160 @@
+# Error Handling
+
+This section will introduce the error information when an exception occurs, so as to help you better understand the Dynamic-to-Static error information.
+When running the transformed static graph code, the internal procedure can be divided into two steps: the dynamic graph code is transformed into the static graph code, and the static graph code is run. We will introduce the error reporting in these two steps.
+
+## Exceptions in Dynamic-to-Static Transformation
+
+If ProgramTranslator cannot transform a function, it will display a warning message and try to run the function as-is.
+
+In the following code, the function `inner_func` is transformed before calling. When calling `inner_func` in `x = inner_func(data)`, it is not allowed to transform repeatedly, and a warning message will be given:
+
+```python
+import paddle
+import numpy as np
+
+paddle.disable_static()
+
+@paddle.jit.to_static
+def func():
+ def inner_func(x):
+ x_tensor = paddle.to_tensor(x)
+ return x_tensor
+ data = np.ones([3]).astype("int32")
+ x = inner_func(data)
+ return x
+func()
+```
+
+The warning message is as follows:
+```bash
+WARNING: doesn't have to be transformed to static function because it has been transformed before, it will be run as-is.
+```
+## Exceptions in Running Transformed Code
+
+When an exception occurs in the transformed code by ProgramTranslator, the exception is caught and the error message is augmented. It maps the error line of the static graph code to the un-transformed dynamic graph code, and then re-raises the exception.
+
+Among the features of the re-raised exception:
+
+- Some useless call stacks of Dynamic-to-Static are hidden;
+- A prompt will be given before the un-transformed code: "In User Code:";
+- The error message includes references to the original dynamic graph code before transformation;
+
+For example, if executing the following code, an exception is raised when the static graph is built, that is, at compile time:
+
+```python
+import paddle
+import numpy as np
+
+paddle.disable_static()
+
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ x = paddle.reshape(x, shape=[-1, -1])
+ return x
+
+func(np.ones([3, 2]))
+```
+
+```bash
+Traceback (most recent call last):
+ in ()
+ func(np.ones([3, 2]))
+ File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__
+ raise new_exception
+AssertionError: In user code:
+
+ File "", line 7, in func
+ x = fluid.layers.reshape(x, shape=[-1, -1])
+ File "paddle/fluid/layers/nn.py", line 6193, in reshape
+ attrs["shape"] = get_attr_shape(shape)
+ File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape
+ "be -1. But received shape[%d] is also -1." % dim_idx)
+ AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1.
+```
+
+The above error information can be divided into three points:
+
+1. In the error stack, the call stacks related to the code transformation process are hidden by default and not displayed, so as to avoid confusion.
+
+2. In the error message processed by ProgramTranslator, a prompt "In user code:" will be included, which means that the following error stacks contains the original dynamic graph code, that is, the code written by the user:
+
+ ```bash
+ AssertionError: In user code:
+
+ File "", line 7, in func
+ x = fluid.layers.reshape(x, shape=[-1, -1])
+ File "paddle/fluid/layers/nn.py", line 6193, in reshape
+ attrs["shape"] = get_attr_shape(shape)
+ File "paddle/fluid/layers/nn.py", line 6169, in get_attr_shape
+ "be -1. But received shape[%d] is also -1." % dim_idx)
+ ```
+ `File "", line 7, in func` is the location information of un-transformed code, `x = fluid.layers.reshape(x, shape=[-1, -1])` is the un-transformed code.
+
+3. The new exception contains the message that the exception originally reported, as follows:
+ ```bash
+ AssertionError: Only one dimension value of 'shape' in reshape can be -1. But received shape[1] is also -1.
+ ```
+
+If execute the following code, an exception is raised when the static graph is executed at runtime:
+
+```Python
+@paddle.jit.to_static
+def func(x):
+ x = paddle.to_tensor(x)
+ two = paddle.fill_constant(shape=[1], value=2, dtype="int32")
+ x = paddle.reshape(x, shape=[1, two])
+ return x
+
+func(np.ones([3]).astype("int32"))
+```
+
+```bash
+Traceback (most recent call last):
+ File "", line 10, in ()
+ func(np.ones([3]).astype("int32"))
+ File "paddle/fluid/dygraph/dygraph_to_static/program_translator.py", line 332, in __call__
+ raise new_exception
+
+EnforceNotMet: In user code:
+
+ File "", line 7, in func
+ x = paddle.reshape(x, shape=[1, two])
+ File "paddle/tensor/manipulation.py", line 1347, in reshape
+ return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
+ File "paddle/fluid/layers/nn.py", line 6209, in reshape
+ "XShape": x_shape})
+ File "paddle/fluid/layer_helper.py", line 43, in append_op
+ return self.main_program.current_block().append_op(*args, **kwargs)
+ File "paddle/fluid/framework.py", line 2880, in append_op
+ attrs=kwargs.get("attrs", None))
+ File "paddle/fluid/framework.py", line 1977, in __init__
+ for frame in traceback.extract_stack():
+
+--------------------------------------
+C++ Traceback (most recent call last):
+--------------------------------------
+0 paddle::imperative::Tracer::TraceOp(std::string const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap, paddle::platform::Place const&, bool)
+1 paddle::imperative::OpBase::Run(paddle::framework::OperatorBase const&, paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&, paddle::platform::Place const&)
+2 paddle::imperative::PreparedOp::Run(paddle::imperative::NameVarBaseMap const&, paddle::imperative::NameVarBaseMap const&, paddle::framework::AttributeMap const&)
+3 std::_Function_handler >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&)
+4 paddle::operators::RunProgramOpKernel::Compute(paddle::framework::ExecutionContext const&) const
+5 paddle::framework::Executor::RunPartialPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, long, long, bool, bool, bool)
+6 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&)
+7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const
+8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const
+9 paddle::operators::ReshapeKernel::operator()(paddle::framework::ExecutionContext const&) const
+10 paddle::operators::ReshapeOp::ValidateShape(std::vector >, paddle::framework::DDim const&)
+11 paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
+12 paddle::platform::GetCurrentTraceBackString()
+
+----------------------
+Error Message Summary:
+----------------------
+InvalidArgumentError: The 'shape' in ReshapeOp is invalid. The input tensor X'size must be equal to the capacity of 'shape'. But received X's shape = [3], X's size = 3, 'shape' is [1, 2], the capacity of 'shape' is 2.
+ [Hint: Expected capacity == in_size, but received capacity:2 != in_size:3.] (at /paddle/paddle/fluid/operators/reshape_op.cc:206)
+ [operator < reshape2 > error] [operator < run_program > error]
+```
+
+In the above exception, in addition to hiding part of the error stack and locating the error to the un-transformed dynamic graph code, the error information includes the c++ error stack `C++ Traceback` and `Error Message Summary`, which are the exception from C++ and are displayed in Python exception after processing.
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e1a0867bb021b560a5380dfb997b318e19ab0e8f
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_cn.rst
@@ -0,0 +1,122 @@
+ProgramTranslator支持的语法
+==========================
+
+ProgramTranslator本质是把Python运行语法转写为PaddlePaddle静态图代码,但是Python语法的表达能力和PaddlePaddle静态图表达能力存在不同,这使得一些代码无法被转换。
+
+本章节我们将详细讲述在动转静过程中支持转化哪些语法,不支持哪些语法,并且讲述如何改写代码能够解决语法不支持的场景。
+
+动转静支持的语法分为以下几个大类:
+
+控制流相关关键词
+------------------
+
+控制流指if-elif-else,while等能够控制程序语句执行顺序的关键字。PaddlePaddle静态图通过cond,while_loop API来实现条件判断和循环,如果动态图Python控制流的判断条件或循环条件依赖 PaddlePaddle Tensor,动转静后会被转化为等价的PaddlePaddle控制流接口,否则仍然使用Python控制流逻辑运行。在动转静过程中这些关键字的转化情况为:
+
+1. if-elif-else 条件
+
+当 ``if <条件>`` 中的条件是Tensor时,ProgramTranslator会把该if-elif-else语句转化为等价的cond API语句。否则会按普通Python if-elif-else的逻辑运行。需注意cond支持的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。
+
+2. while 循环
+
+当while循环中的条件是Tensor时,ProgramTranslator会把该while语句转化为等价的while_loop API语句,否则会按普通Python while运行。需注意while循环条件中的Tensor只能是numel为1的bool Tensor,所以请使用这种Tensor进行条件判断,其他Tensor会报错。
+
+
+3. for 循环
+
+3.1 ``for _ in range(__)`` 循环
+
+ProgramTranslator先将其转化为等价的Python while循环,然后按while循环的逻辑进行动静转换。
+
+3.2 ``for _ in x`` 循环
+
+当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,会转化为循环中每次对应拿出x[0], x[1], ... 。
+
+3.3 ``for idx, val in enumerate(x)`` 循环
+
+当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,idx会转化为依次0,1,...的1-D Tensor。val会转化为循环中每次对应拿出x[0], x[1], ... 。
+
+4. break,continue
+
+ProgramTranslator 可以支持在循环中添加break,continue语句,其底层实现原理是对于要break,continue的部分在相应时候使用cond在一定条件下跳过执行。
+
+5. return
+
+ProgramTranslator 支持在循环,条件判断中return结果而不需要一定在函数末尾return。也能够支持return不同长度tuple和不同类型的Tensor。其底层实现原理是对return后的部分相应使用cond在一定条件下跳过执行。
+
+
+一些需要转化的运算类型
+------------------------
+
+1. +,-,*,/,**, >, <, >= , <=, == 等Python内置运算
+
+由于静态图有重载这些基本运算符,所以这些被ProgramTranslator转化后都适用相应重载的运算符,动转静支持此类运算。
+
+2. and,or,not 逻辑运算
+
+Python内置and,or,not逻辑运算关键词,ProgramTranslator在语句的运算时会判断逻辑运算关键词运行的对象是否是Tensor,如果都是Tensor,我们将其转化为静态图对应的逻辑运算接口并运行。
+
+3. 类型转化
+
+动态图中可以直接用Python的类型转化语法来转化Tensor类型。例如x是Tensor时,float(x)可以将x的类型转化为float。ProgramTranslator在运行时判断x是否是Tensor,如果是,则在动转静时使用静态图cast接口转化相应的Tensor类型。
+
+Python 函数相关
+---------------------
+
+1. print
+
+如果x是Tensor,在动态图模式中print(x)可以打印x的值。在动转静过程中我们把此转化为静态图的Print接口实现,使得在静态图中也能打印。如果print的参数不是Tensor,那么我们没有把相应print语句进行转写。
+
+2. len
+
+如果x是Tensor,在动态图模式中len(x)可以获得x第0维度的长度。在动转静中我们把此转化为静态图shape接口,并返回shape的第0维。另外如果x是个TensorArray,那么len(x)将会使用静态图接口control_flow.array_length返回TensorArray的长度。对于其他情况,动转静时会按照普通Python len函数运行。
+
+3. lambda 表达式
+
+动转静允许写带有Python lambda表达式的语句,并且我们会适当改写使得返回对应结果。
+
+4. 函数内再调用函数
+
+对于函数内调用其他函数的情况,ProgramTranslator也会对内部的函数递归地进行动转静,这样做的好处是可以在最外层函数只需加一次装饰器即可,而不需要每个函数都加装饰器。但需要注意,动转静还不支持函数递归调用自己,详细原因请查看下文动转静无法正确运行的情况。
+
+报错异常相关
+--------------
+
+1. assert
+
+如果x是Tensor,在动态图中可以通过assert x来强制x为True或者非0值,在动转静中我们把此转化为静态图Assert接口支持此功能。
+
+
+Python基本容器
+---------------
+
+1. list:对于一个list如果里面元素都是Tensor,那么动转静会转化其为TensorArray,静态图TensorArray可以支持append,pop,修改操作。因此ProgramTranslator在元素皆为Tensor的list中支持上面三种操作。换言之,其他list操作,比如sort无法支持。对于list中并非所有元素是Tensor的情况,ProgramTranslator会将其作为普通Python list运行。
+
+2. dict:ProgramTranslator会将相应的dict中的Tensor添加进静态图Program,因此使用dict是动转静支持的语法。
+
+动转静无法正确运行的情况
+--------------------------
+
+1. Reshape后的变量调用其shape作为PaddlePaddle API参数。
+
+具体表现比如 ``x = reshape(x, shape=shape_tensor)`` ,再使用 ``x.shape[0]`` 的值进行其他操作。这种情况会由于动态图和静态图的本质不同而使得动态图能够运行,但静态图运行失败。其原因是动态图情况下,API是直接返回运行结果,因此 ``x.shape`` 在经过reshape运算后是确定的。但是在转化为静态图后,因为静态图API只是组网,``shape_tensor`` 的值在组网时是不知道的,所以 ``reshape`` 接口组网完,静态图并不知道 ``x.shape`` 的值。PaddlePaddle静态图用-1表示未知的shape值,此时 ``x`` 的shape每个维度会被设为-1,而不是期望的值。
+
+遇到这类情况我们建议尽量固定shape值,减少reshape操作。
+
+2. 多重list嵌套读写Tensor
+
+具体表现如 ``l = [[tensor1, tensor2], [tensor3, tensor4]]`` ,因为现在动转静将元素全是Tensor的list转化为TensorArray,而PaddlePaddle的TensorArray还不支持多维数组,因此这种情况下,动转静无法正确运行。
+
+遇到这类情况我们建议尽量用一维list,或者自己使用PaddlePaddle的create_array,array_read,array_write接口编写为TensorArray。
+
+3. Tensor值在被装饰函数中转成numpy array进行运算
+
+具体表现为在被装饰函数中没有返回Tensor时就使用 ``numpy.array(tensor)`` 将Tensor转化为numpy array并使用numpy接口进行运算。这种情况在动态图下因为Tensor有值是可以正常运行的,但是在静态图时由于Tensor只是组网变量,在没有运行时没有数值,因此无法进行numpy运算。
+
+遇到这种情况我们建议在动转静的函数中尽量使用PaddlePaddle接口替代numpy接口进行运算。
+
+4. 一个函数递归调用自己
+
+ProgramTranslator还无法支持一个函数递归调用自己,原因是递归常常会用 ``if-else`` 构造停止递归的条件。然而这样的停止条件在静态图下只是一个 ``cond`` 组网,组网并不能在编译阶段决定自己组多少次,会导致函数运行时一直组网递归直至栈溢出,因此ProgramTranslator还无法支持一个函数递归调用自己。
+
+遇到这种情况我们建议将代码改为非递归写法。
+
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c88a997165f06df3e51e01145ddfec9558aedc8
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/grammar_list_en.rst
@@ -0,0 +1,124 @@
+Supported Grammars
+==================
+
+The key part of ProgramTranslator is transforming Python grammar into PaddlePaddle static graph code, but there exists difference between Python and PaddlePaddle static graph which causes some limitation of the code transformation.
+
+In this section we will talk about the supported grammars and unsupported grammars, also give some suggestions when the grammar is unsupported.
+
+There are several kinds of supported grammars:
+
+Control flow keywords
+---------------------
+
+Control flow means those keywords that controls the execution order of program statements, for example ``if-elif-else, while`` . Conditional operation and loop were implemented as ``cond, while_loop`` APIs in PaddlePaddle static graph. If the condition of a Python dygraph control flow depends on PaddlePaddle Tensor, the ProgramTranslator will convert the control flow into equivalent PaddlePaddle control flow APIs, else it will still be executed as Python control flow. The transformations of those control flow keywords are listed below:
+
+1. ``if-elif-else`` statements
+
+If the condition of ``if `` is Tensor, ProgramTranslator will turn this ``if-elif-else`` statement to equivalent PaddlePaddle static graph ``cond`` statements, otherwise the ``if-elif-else`` statement is executed as normal Python conditional statement. Note that ``cond`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph conditional statement, other Tensors will cause error.
+
+2. ``while`` loop
+
+If the condition of ``while`` is Tensor, ProgramTranslator will turn this ``while`` statement to equivalent PaddlePaddle static graph ``while_loop`` statements, otherwise the ``while`` statement is executed as normal Python ``while`` loop statement. Note that ``while_loop`` API only accepts input conditional Tensor with numel equals to 1, so please use this kind of Tensor to write dygraph loop condition statement, other Tensors will cause error.
+
+3. ``for`` loop
+
+3.1 ``for _ in range(__)`` loop
+
+Firstly, ProgramTranslator will transform it into equivalent Python while loop, then convert dygraph to static graph by same logic of ``while`` loop.
+
+3.2 ``for _ in x`` loop
+
+If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, ProgramTranslator will transform the loop into PaddlePaddle static graph loop and fetches ``x[0], x[1], ...`` as loop iteration variable in each loop iteration.
+
+3.3 ``for idx, val in enumerate(x)`` loop
+
+If ``x`` is a Python container, iterator, or generator, it will be executed as original Python statement. Otherwise ``x`` is a Tensor, Program
+Translator will transform the loop into PaddlePaddle static graph loop. The ``idx`` will be transformed to 1-D tensor with value ``0, 1, ...`` and the ``val`` will be transformed to ``x[0], x[1], ...`` in each loop iteration.
+
+4. ``break, continue``
+
+ProgramTranslator supports ``break, continue`` statements in loop. ProgramTranslator will add some PaddlePaddle static graph ``cond`` statements to skip execution of corresponding part when ``break, continue`` condition is meet.
+
+5. ``return``
+
+ProgramTranslator supports ``return`` in a conditonal block or loop body, not necessary to be at the end of a function. It also supports returning tuple with various length of Tensors with different dtype. The implementation is adding some PaddlePaddle static graph ``cond`` statement to skipparts of code when ``return`` is triggered.
+
+
+Some Python basic operators
+---------------------------
+
+1. ``+, -, *, /, **, >, <, >= , <=, ==`` etc.
+
+Because PaddlePaddle static graph overrides those Python basic arithmetic operators and comparison operators, ProgramTranslator can support those operators.
+
+2. ``and, or, not`` logical operators
+
+Python has ``and, or, not`` keywards as basic logical operators, ProgramTranslator will check whether the variables of the logical operators are Tensors, if they are Tensors, ProgramTranslator replaces the ``and, or, not`` statements into corresponding PaddlePaddle static graph logical operator and run it.
+
+3. Type casting
+
+In dygraph mode, users can use Python type casting grammar. For instance, if ``x`` is a Tensor, ``float(x)`` casts the data type of ``x`` to float. ProgramTranslator will check whether ``x`` is a Tensor during run time, if it is, the casting sentence will be modified to PaddlePaddle static graph ``cast`` API so that its dtype can be changed in the dygraph to static transformation.
+
+Python functions
+------------------------------
+
+1. ``print``
+
+In dygraph mode, ``print(x)`` will print Tensor value if ``x`` is a Tensor. ProgramTranslator converts the built-in ``print`` to PaddlePaddle static graph ``Print`` API during dygraph to static graph transformation if the arguments are Tensors, otherwise ProgramTranslator won't convert the ``print``.
+
+2. ``len``
+
+If ``x`` is a Tensor, ``len(x)`` can get the length at 0-dimension of ``x`` in dygraph mode. ProgramTranslator turns it to PaddlePaddle static graph ``shape`` API and returns the 0-dimension of the ``shape``, else if ``x`` is a TensorArray, then ``len(x)`` will be transformed to static graph API ``control_flow.array_length`` to return the length of TensorArray. In other cases, the ``len`` function will be executed as Python built-in ``len``
+
+3. lambda expression
+
+ProgramTranslator supports Python lambda expression and it modifies code to return the expected result.
+
+
+4. Calling function
+
+If the transformed function calls another function, ProgramTranslator also transform the called function. The benefit is that users can add one decorator at the outside function to do transformation, no need to add the decorator for each function. Note that ProgramTranslator doesn't support
+that a function calls itself recursively, the details is in the unsupported grammars section below.
+
+
+Errors and Exceptions
+---------------------
+
+1. ``assert``
+
+If ``x`` is a Tensor, ``assert x`` statement can assert ``x`` to be ``True`` or non-zero value in dygraph mode. ProgramTranslator converts the statement into PaddlePaddle static graph ``Assert`` API to support this grammar.
+
+
+Python containers
+-----------------
+
+1. ``list``: if all elements in a list are Tensors, then ProgramTranslator converts it to TensorArray. PaddlePaddle static graph TensorArray supports append, pop, and modify, other list operations such as sort cannot be supported. When not all elements in a list are Tensors, ProgramTranslator will treat it as normal Python list.
+
+2. ``dict``: ProgramTranslator will add the Tensors in a dict into PaddlePaddle static graph ``Program``, so ``dict`` is supported by ProgramTranslator.
+
+Unsupported grammars
+--------------------
+
+1. Use the shape of output tensor of ``reshape``
+
+For example, ``x = reshape(x, shape=shape_tensor)`` , then use ``x.shape[0]`` to do other operation. Due to the difference between dygraph and static graph, it is okay in dygraph but it will fail in static graph. The reason is that APIs return computation result in dygraph mode, so ``x.shape`` has deterministic value after calling ``reshape`` . However, static graph doesn't have the value ``shape_tensor`` during building network, so PaddlePaddle doesn't know the value of ``x.shape`` after calling ``reshape``. PaddlePaddle static graph will set -1 to represent unknown shape value for each dimension of ``x.shape`` in this case, not the expected value.
+
+We suggest to set fixed shape value as much as possible, reduce the reshape operation.
+
+2. List of list of Tensor
+
+For example: ``l = [[tensor1, tensor2], [tensor3, tensor4]]``, because ProgramTranslator transformed a list whose elements are all Tensors into PaddlePaddle static graph TensorArray, but TensorArray doesn't support multi-dimensions, ProgramTranslator cannot run this case.
+
+We suggest to use 1-D list at most time, or use PaddlePaddle API ``create_array, array_read, array_write`` to control TensorArray.
+
+3. Convert Tensor to numpy array and do operation
+
+For example, user doesn't return Tensor in the decorated function but call ``numpy.array(tensor)`` to convert Tensor to numpy array and then use numpy API to compute on it. In dygraph mode, it is okey because Tensor has value, but Tensor is variable for building network in static graph mode, it doesn't contain value if not in static graph running time, so we cannot do numpy calculation on it.
+
+We suggest to use PaddlePaddle APIs to replace numpy API in this case.
+
+4. A function calls itself recursively
+
+ProgramTranslator doesn't support a function calls itself recursively, the reason is that recursive function usually uses ``if-else`` for a condition to stop the recursion, the stop condition will be transformed to a ``cond`` in static graph mode. Since ``cond`` just builds network, it cannot determine how many times it recursively builds network during network built stage, so the function will recursively call itself and build network until stack overflow. Due to above reason, ProgramTranslator cannot support a function calls itself recursively now.
+
+We suggest to write non-recursive function in this case.
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8dbc8d7bcf15df9b3ae3c571c76df82db223b436
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/index_cn.rst
@@ -0,0 +1,20 @@
+###############
+动态图转静态图
+###############
+
+- `动态图转静态图 `_ :介绍了动态图转静态图的基本使用方法和架构原理
+
+- `支持语法列表 `_ :介绍了动态图转静态图支持的语法以及罗列不支持的语法写法
+
+- `报错信息处理 `_ :介绍了动态图转静态图的报错信息处理方法
+
+- `调试方法 `_ :介绍了动态图转静态图支持的调试方法
+
+
+.. toctree::
+ :hidden:
+
+ grammar_list_cn.rst
+ program_translator_cn.rst
+ error_handling_cn.md
+ debugging_cn.md
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6ff26175e9483c0b834bac65308af8eedec71794
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/index_en.rst
@@ -0,0 +1,20 @@
+#######################
+Dygraph to Static Graph
+#######################
+
+- `Dygraph to Static Graph `_ :Introduce the basic usage for transforming dygraph code into static code and the architecture of ProgramTranslator.
+
+- `Supported Grammars `_ :Introduce the grammars supported by ProgramTranslator and list unsupport grammars.
+
+- `Error Handling `_ :Introduce the error handling by ProgramTranslator.
+
+- `Debugging Methods `_ :Introduce the debugging methods when using ProgramTranslator.
+
+.. toctree::
+ :hidden:
+
+ grammar_list_en.rst
+ program_translator_en.rst
+ error_handling_en.md
+ debugging_en.md
+
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..03852fc58a6bbe9c515de9138c40d38e837d25a8
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_cn.rst
@@ -0,0 +1,182 @@
+动态图转静态图
+================
+
+动态图有诸多优点,包括易用的接口,python风格的编程体验,友好的debug交互机制等。在动态图模式下,代码是按照我们编写的顺序依次执行。这种机制更符合Python程序员的习惯,可以很方便地将大脑中的想法快速地转化为实际代码,也更容易调试。但在性能方面,Python执行开销较大,与C++有一定差距。因此在工业界的许多部署场景中(如大型推荐系统、移动端)都倾向于直接使用C++来提速。
+
+相比动态图,静态图在部署方面更具有性能的优势。静态图程序在编译执行时,先搭建模型的神经网络结构,然后再对神经网络执行计算操作。预先搭建好的神经网络可以脱离Python依赖,在C++端被重新解析执行,而且拥有整体网络结构也能进行一些网络结构的优化。
+
+动态图代码更易编写和debug,但在部署性能上,静态图更具优势。因此我们新增了动态图转静态图的功能,支持用户依然使用动态图编写组网代码。PaddlePaddle会对用户代码进行分析,自动转换为静态图网络结构,兼顾了动态图易用性和静态图部署性能两方面优势。
+
+基本使用方法
+--------------
+
+PaddlePaddle提供了两种动态图转静态图的方式,基于动态图trace的TracedLayer与基于源代码级别转换的ProgramTranslator。
+
+1. 基于trace的TracedLayer:
+
+trace是指在模型运行时记录下其运行过哪些算子。TracedLayer就是基于这种技术,在一次执行动态图的过程中,记录所有运行的算子,并构建和保存静态图模型。一个使用例子如下:
+
+我们先定义一个简单的Fully Connected网络:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+
+ class SimpleFcLayer(paddle.nn.Layer):
+ def __init__(self, feature_size, batch_size, fc_size):
+ super(SimpleFCLayer, self).__init__()
+ self._linear = paddle.nn.Linear(feature_size, fc_size)
+ self._offset = paddle.to_tensor(
+ np.random.random((batch_size, fc_size)).astype('float32'))
+
+ def forward(self, x):
+ fc = self._linear(x)
+ return fc + self._offset
+
+
+接下来是TracedLayer如何存储模型:
+
+.. code-block:: python
+ import paddle
+ from paddle.jit import TracedLayer
+
+ paddle.disable_static()
+
+ fc_layer = SimpleFcLayer(3, 4, 2)
+ in_np = np.random.random([3, 4]).astype('float32')
+ # 将numpy的ndarray类型的数据转换为Tensor类型
+ input_var = paddle.to_tensor(in_np)
+ # 通过 TracerLayer.trace 接口将命令式模型转换为声明式模型
+ out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var])
+ save_dirname = './saved_infer_model'
+ # 将转换后的模型保存
+ static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
+
+
+载入的模型可以使用静态图方式运行
+
+.. code-block:: python
+
+ place = paddle.CPUPlace()
+ exe = paddle.Executor(place)
+ program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe)
+ fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
+
+
+但是也正如我们阐述的原理,trace只是记录了一次执行涉及算子,若在用户的模型代码中,包含了依赖数据条件(包括输入的值或者shape)的控制流分支,即根据数据条件触发运行不同的算子,则TracedLayer无法正常工作。比如下面
+
+
+.. code-block:: python
+
+ import paddle
+
+ def func(input_var)
+ # if判断与输入input_var的shape有关
+ if input_var.shape[0] > 1:
+ return paddle.cast(input_var, "float64")
+ else:
+ return paddle.cast(input_var, "int64")
+
+ paddle.disable_static()
+ in_np = np.array([-2]).astype('int')
+ input_var = paddle.to_tensor(in_np)
+ out = func(input_var)
+
+
+上例如果在使用TracedLayer.trace(func, inputs=[input_var]),由于trace只能记录if-else其中跑的一次算子,模型就无法按用户想要的根据input_var的形状进行if-else控制流保存。类似的控制流还有while/for循环的情况
+
+2. 基于源代码转写的ProgramTranslator
+
+对于依赖数据的控制流,我们使用基于源代码转写的ProgramTranslator来进行动态图转静态图。其基本原理是通过分析Python代码来将动态图代码转写为静态图代码,并在底层自动帮用户使用执行器运行。其基本使用方法十分简便,只需要在要转化的函数(该函数也可以是用户自定义动态图Layer的forward函数)前添加一个装饰器 ``@paddle.jit.to_static`` ,上面的例子转化如下,并且可以依旧使用该函数运行得到结果:
+
+.. code-block:: python
+
+ import paddle
+
+ @paddle.jit.to_static
+ def func(input_var)
+ # if判断与输入input_var的shape有关
+ if input_var.shape[0] > 1:
+ out = paddle.cast(input_var, "float64")
+ else:
+ out = paddle.cast(input_var, "int64")
+
+ paddle.disable_static()
+ in_np = np.array([-2]).astype('int')
+ input_var = paddle.to_tensor(in_np)
+ func(input_var)
+
+
+若要存储转化后的静态图模型,可以调用 ``paddle.jit.save`` ,我们再以SimpleFcLayer为例,需要在SimpleFcLayer的forward函数添加装饰器:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+
+ class SimpleFcLayer(paddle.nn.Layer):
+ def __init__(self, feature_size, batch_size, fc_size):
+ super(SimpleFCLayer, self).__init__()
+ self._linear = paddle.nn.Linear(feature_size, fc_size)
+ self._offset = paddle.to_tensor(
+ np.random.random((batch_size, fc_size)).astype('float32'))
+
+ @paddle.jit.to_static
+ def forward(self, x):
+ fc = self._linear(x)
+ return fc + self._offset
+
+
+存储该模型可以使用paddle.jit.save接口:
+
+.. code-block:: python
+
+ import paddle
+
+ paddle.disable_static()
+
+ fc_layer = SimpleFcLayer(3, 4, 2)
+ in_np = np.random.random([3, 4]).astype('float32')
+ input_var = paddle.to_tensor(in_np)
+ out = fc_layer(input_var)
+
+ paddle.jit.save(fc_layer, "./fc_layer_dy2stat", input_spec=[input_var])
+
+内部架构原理
+--------------
+
+TracedLayer的原理就是trace,相对简单,因此我们在这里不展开描述。本节将主要阐述ProgramTranslator基于源代码将动态图代码转化为静态图代码。
+
+
+转化过程发生在用户开始调用被装饰的函数,转换过程在装饰器中实现。我们将内部涉及的过程分为以下几步:
+
+1. 函数与缓存
+
+动态图转静态图的主体是函数(Function)。对于函数内包含的PaddlePaddle接口,如果是仅计算相关算子代码语句,那么因为PaddlePaddle动态图和静态图接口一致,我们不需要额外转换这些代码为静态图代码。但是对于动态图,此类代码接口是直接运行计算和返回结果,而对于静态图此类代码接口其实是组网。那么如果被转化的函数被调用多次,动态图转静态图后会多次组网添加对应算子,这显然会导致问题。为了解决这个问题以及为了加速动转静转化过程,我们维护了被装饰器装饰的函数(Function)与其输入形状(shape),数据类型(dtype)映射到被转化后组网的Program的缓存(Cache)。当要被转化的函数命中缓存,我们直接用对应存储的Program运行静态图得到结果,否则我们才进行语句转化,并且转化成功后的Program存储进缓存。
+
+2. 动态图源码转AST(抽象语法树)
+
+动态图转静态图的最核心部分类似一个编译器,解析动态图代码语句为AST,再对应AST进行改写,最后反转回成静态图代码。从函数转化为代码字符串可以使用Python的inspect.getsource。从字符串Python提供了自带的 `ast `_ 库来解析字符串为AST,但是由于Python2,Python3的语法略有不同,为了避免我们需要额外处理这些Python2,Python3的不同情况,我们使用了统一Python2,Python3的开源AST处理 `gast库 `_ 。这些接口使得函数转化为AST没有本质上的困难。
+
+3. AST改写和静态图源码转换
+
+这部分为动转静最核心的部分,我们对支持的各种语法进行ast转写。其中最重要的Python控制流,if-else,while,for循环被分别分析转化为PaddlePaddle静态图接口cond,while_loop等接口实现。我们对想转化的每一种主要语法创建一个Transformer(这里的Transformer是Python ast转写的概念,而不是自然语言处理NLP领域的Transformer),每个Transformer扫一遍AST并进行对应的改写。最后被转化完成的AST我们使用gast提供的接口转回成源码。
+
+4. 静态图源码作为动态图一部分运行的技术
+
+为了动静转化更加易用和被转化的代码能在动态图中复用,我们在拥有源码后运行生成Program,并将这个Program作为一个大op,包装成动态图的一个op,这样既能把用户的代码转为静态图提速或者保存部署,另一方面如果用户想在Python层使用生成的静态图代码作为动态图的一部分继续训练或者别的动态图运算也是可以直接使用。
+
+5. 易用性与Debug功能在动转静过程的实现
+
+正如AST转写类似编译器,而一般编译器都会提供debug断点,报错,输出一些中间代码等功能。我们在进行动转静时,万一用户的动态图代码出错,或者用户想断点调试,或者用户想看看被转化后的静态图代码是否符合其预期,我们也希望能够像编译器一样提供这些易用性功能,使得动转静兼顾性能和部署同时还具有易用性。我们这里将列出这些功能的实现方式
+
+A. 报错对应到动态图代码行。由于被转化后的静态图代码和原动态图代码不同,Python运行出错时会报静态图的错误,因此我们在每一次AST转写时添加AST节点对应的原动态图代码行等信息,在Python报错栈中将静态图的报错转化成对应的动态图源码报错
+
+B. 设置断点功能。我们保留了被转化后代码的中的pdb.set_trace(), 用户可以使用这种方式进行断点调试
+
+C. 查看最后转化的静态图代码。我们输出为一个StaticLayer class,这个StaticLayer可以直接被调用,但是也存储转化后的代码,可以调用StaticLayer.code来获得转化后的代码。
+
+D. 输出中间转化状态代码,甚至不同语法Transformer转化的代码,比如经过for循环转化后代码是什么样的。我们开放接口设定了log level来让用户可以打印中间状态转化的代码。
+
+
diff --git a/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..573ddbb79893acb9386c87bec373035a439037b4
--- /dev/null
+++ b/doc/fluid/advanced_guide/dygraph_to_static/program_translator_en.rst
@@ -0,0 +1,178 @@
+Dygraph to Static Graph
+=======================
+
+The imperative-style coding of PaddlePaddle takes advantage of flexibility, Pythonic coding, and easy-to-debug interface. In dygraph mode, code immediately executes kernels and gets numerical results, which allows users to enjoy traditional Pythonic code order. Therefore it is efficient to transform idea into real code and simple to debug. However, Python code is usually slower than C++ thus lots of industrial systems (such as large recommend system, mobile devices) prefer to deploy with C++ implementation.
+
+Static graph is better at speed and portability. Static graph builds the network structure during compiling time and then does computation. The built network intermediate representation can be executed in C++ and gets rids of Python dependency.
+
+While dygraph has usability and debug benefits and static graph yields performance and deployment advantage, we adds functionality to convert dygraph to static graph. Users use imperative mode to write dygraph code and PaddlePaddle will analyze the Python syntax and turn it into network structure of static graph mode. Our approach retains both the usability of dygraph and portability of static graph.
+
+Basic Usage
+--------------
+
+PaddlePaddle has two ways to transform dygraph to static graph. TracedLayer extracts computation graph through tracing and ProgramTranslator gets computation graph through source code transformation.
+
+
+1. TracedLayer:
+
+Tracing means recording the operators when running a model. TracedLayer is based on this technique. It runs dygraph program once and records all operators, then constructs static graph model and saves it. Now take a glance at an usage example:
+
+Define a simple fully connected network:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+
+ class SimpleFcLayer(paddle.nn.Layer):
+ def __init__(self, feature_size, batch_size, fc_size):
+ super(SimpleFCLayer, self).__init__()
+ self._linear = paddle.nn.Linear(feature_size, fc_size)
+ self._offset = paddle.to_tensor(
+ np.random.random((batch_size, fc_size)).astype('float32'))
+
+ def forward(self, x):
+ fc = self._linear(x)
+ return fc + self._offset
+
+Save model by TracedLayer:
+
+.. code-block:: python
+
+ import paddle
+ from paddle.jit import TracedLayer
+
+ paddle.disable_static()
+
+ fc_layer = SimpleFcLayer(3, 4, 2)
+ in_np = np.random.random([3, 4]).astype('float32')
+ # Turn numpy ndarray into Tensor
+ input_var = paddle.to_tensor(in_np)
+ # Transforming imperative mode into declarative mode by TracerLayer.trace
+ out_dygraph, static_layer = TracedLayer.trace(fc_layer, inputs=[input_var])
+ save_dirname = './saved_infer_model'
+ # Save the transformed model
+ static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
+
+Load model and run it in static graph mode:
+
+.. code-block:: python
+
+ place = paddle.CPUPlace()
+ exe = paddle.Executor(place)
+ program, feed_vars, fetch_vars = paddle.io.load_inference_model(save_dirname, exe)
+ fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
+
+However, as tracing only records operators once, if user's code contains Tensor-dependent (including Tensor value or Tensor shape) control flow, that is the Tensor can cause different operators being executed, then TracedLayer cannot handle this case. For instance:
+
+.. code-block:: python
+
+ import paddle
+
+ def func(input_var)
+ # if condition depends on the shape of input_var
+ if input_var.shape[0] > 1:
+ return paddle.cast(input_var, "float64")
+ else:
+ return paddle.cast(input_var, "int64")
+
+ paddle.disable_static()
+ in_np = np.array([-2]).astype('int')
+ input_var = paddle.to_tensor(in_np)
+ out = func(input_var)
+
+If we apply TracedLayer.trace(func, inputs=[input_var]) on above example, tracing can take record of operators in only one branch of if-else, then the model can not be saved as what user orignally means. The similar situations applies to while/for loop.
+
+2. ProgramTranslator
+
+For the Tensor-dependent control flow, we use source-code-translate based ProgramTranslator to convert dygraph into static graph. The basic idea is analyzing Python source code and turning into static graph code, then run the static graph code using Executor. The basic usage of ProgramTranslator is simple, put a decorator ``@paddle.jit.to_static`` before the definition of the function to transform (the function can also be a method of a class, e.g., the ``forward`` function of user-defined imperative Layer). Above Tensor-dependent example can be transformed correctly by ProgramTranslator as below:
+
+.. code-block:: python
+
+ import paddle
+
+ @paddle.jit.to_static
+ def func(input_var)
+ # if condition depends on the shape of input_var
+ if input_var.shape[0] > 1:
+ out = paddle.cast(input_var, "float64")
+ else:
+ out = paddle.cast(input_var, "int64")
+
+ paddle.disable_static()
+ in_np = np.array([-2]).astype('int')
+ input_var = paddle.to_tensor(in_np)
+ func(input_var)
+
+To save the transformed model, we can call ``paddle.jit.save`` . Let's take ``SimpleFcLayer`` as an example again, we put decorator at the ``forward`` method of ``SimpleFcLayer`` :
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+
+ class SimpleFcLayer(paddle.nn.Layer):
+ def __init__(self, feature_size, batch_size, fc_size):
+ super(SimpleFCLayer, self).__init__()
+ self._linear = paddle.nn.Linear(feature_size, fc_size)
+ self._offset = paddle.to_tensor(
+ np.random.random((batch_size, fc_size)).astype('float32'))
+
+ @paddle.jit.to_static
+ def forward(self, x):
+ fc = self._linear(x)
+ return fc + self._offset
+
+
+Calling ``paddle.jit.save`` to save above model:
+
+.. code-block:: python
+
+ import paddle
+
+ paddle.disable_static()
+
+ fc_layer = SimpleFcLayer(3, 4, 2)
+ in_np = np.random.random([3, 4]).astype('float32')
+ input_var = paddle.to_tensor(in_np)
+ out = fc_layer(input_var)
+
+ paddle.jit.save(fc_layer, "./fc_layer_dy2stat")
+
+
+Architecture
+--------------
+
+The basic idea of TracedLayer is tracing, it is relatively simple so we won't expend here. This section will talk about the source code transformation of ProgramTranslator.
+
+The transformation is implemented in the decorator so transformation happens when user calls the decorated function, the procedure includes these steps:
+
+1. Function and cache.
+
+The entity for transforming dygraph to static graph is the decorated function. For the PaddlePaddle APIs in the function, since they are same code under dygraph mode and static mode, we don't have to transform those code. However, those APIs are computation in dygraph model while they are building network in static graph mode, if the transformed functions are called multiple times, those APIs will build network multiple times in static graph, which can cause problem. To solve it as well as speed up the transformation, we maintain a cache that maps from function, input shapes, input data types to the Program built by the transformed function. If the function hits cache, we run the stored Program in static graph mode to get result, else we do the code transformation on the function and store the transformed Program into the cache.
+
+2. From dygraph source code to AST (Abstract Syntax Tree)
+
+The core of transforming dygraph to static graph is similar to a compiler, we parse the dygraph code into AST, change AST, then turn it back into static graph code. We use Python ``inspect.getsource`` to get the source code string of the function. Python provides ``ast`` library to parse string code into AST, but Python2, Python3 have slight grammar difference. To avoid the work to handle different grammars, we used an open source AST library `gast `_ that provides compatibility AST among various Python versions. There is no essential difficulty to turn function into AST with these library.
+
+3. Transform AST and turn it to static graph code
+
+This part is the key part in ProgramTranslator, we modify AST for supported grammars. Those important Python control flows, such as ``if-elif-else, while, for`` loop are converted to PaddlePaddle static graph API ``cond, while_loop`` and so on. We created a Transformer (AST-to-AST Transformer in Python, not the Transformer in Natural Language Process) to transform each grammar. Every Transformer scans AST and modify it. Lastly, we turn AST back to source code string by ``gast`` library.
+
+4. Running static graph code as part of dygraph
+
+In order to increase usability and re-use the transformed static graph code in dygraph, we wrap the generated Program as an dygraph op, the op can run the forward and backward computation of transformed Program. Then we can not only speed up dygraph code or save it for deployment, but also enable user to run part of their dygraph code in static graph mode so that they can continue training or other dygraph computation in their dygraph code.
+
+5. Error handling and Debug
+
+Compiler usually supports debug functionality like breakpoint, throwing exception, print some mid-level codes. ProgramTranslator is similar to a compiler, users may would like to set breakpoints for debugging, or see whether the transformed static graph code is expected. So we also implemented those error handling and debug functionality. Here we list those functions and their implementation.
+
+A. Report errors/exceptions on dygraph code line. Because the transformed static graph code is different to original dygraph code, when Python executes the static graph code, the exceptions will be reported at static graph code. To locate the corresponding dygraph code, we attach some informations such as line number on AST nodes when we transform AST, then we can re-write the static graph exception to the corresponding dygraph code exception.
+
+B. We support ``pdb.set_trace()`` when running ProgramTranslator, user can add this line to set breakpoints.
+
+C. Check the transformed static graph code. Our transformed output is a Python class named ``StaticLayer``, this class can be called, but it also stores the transformed code string. Users could call ``StaticLayer.code`` to get the converted code.
+
+D. Print mid-level transformed code, such as what's the code after transforming ``for`` loop. We provide APIs to set log level to let user check the mid-level code.
+
+
diff --git a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md
index dc197730da5ba212066fc231885ce57081d0447e..df0149674045239242735bad0c778d74f9d17811 100644
--- a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md
+++ b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl.md
@@ -1,248 +1,300 @@
# VisualDL 工具简介
+
+
-
+
-## 介绍
-VisualDL是一个面向深度学习任务设计的可视化工具,包含了scalar、参数分布、模型结构、图像可视化等功能,项目正处于高速迭代中,新的组件会不断加入。
-目前大多数DNN平台均使用Python作为配置语言,VisualDL原生支持python的使用,
-通过在模型的Python配置中添加几行,便可以为训练过程提供丰富的可视化支持。
-除了Python SDK之外,VisualDL底层采用C++编写,其暴露的C++ SDK也可以集成到其他平台中,
-实现原生的性能和定制效果。
+VisualDL是飞桨可视化分析工具,以丰富的图表呈现训练参数变化趋势、模型结构、数据样本、直方图、PR曲线及高维数据分布。可帮助用户更清晰直观地理解深度学习模型训练过程及模型结构,进而实现高效的模型优化。
-## 组件
-VisualDL 目前支持以下组件:
+具体功能使用方式请参见**VisualDL使用指南**。项目正处于高速迭代中,敬请期待新组件的加入。
-- scalar
-- histogram
-- image
-- audio
-- graph
-- high dimensional
+VisualDL支持浏览器种类:Chrome(81和83)、Safari 13、FireFox(77和78)、Edge(Chromium版)。
-### Scalar
-可以用于展示训练测试的误差趋势
+VisualDL原生支持python的使用, 通过在模型的Python配置中添加几行代码,便可为训练过程提供丰富的可视化支持。
-
-
-
-### Histogram
-用于可视化任何tensor中元素分布的变化趋势
+## 目录
-
-
-
+* [核心亮点](#核心亮点)
+* [安装方式](#安装方式)
+* [使用方式](#使用方式)
+* [可视化功能概览](#可视化功能概览)
+* [开源贡献](#开源贡献)
+* [更多细节](#更多细节)
+* [技术交流](#技术交流)
-### Image
-可以用于可视化任何tensor,或模型生成的图片
-
-
-
-### Audio
-可用于播放输入或生成的音频样本
+## 核心亮点
-### Graph
+### 简单易用
-VisualDL的graph支持paddle program的展示,同时兼容 ONNX(Open Neural Network Exchange)[https://github.com/onnx/onnx],通过与 python SDK的结合,VisualDL可以兼容包括 PaddlePaddle, pytorch, mxnet在内的大部分主流DNN平台。
+API设计简洁易懂,使用简单。模型结构一键实现可视化。
-
-
-
+### 功能丰富
-要进行paddle模型的展示,需要进行以下两步操作:
+功能覆盖标量、数据样本、图结构、直方图、PR曲线及数据降维可视化。
-1. 在paddle代码中,调用`fluid.io.save_inference_model()`接口保存模型
-2. 在命令行界面,使用`visualdl --model_pb [paddle_model_dir]` 加载paddle模型
+### 高兼容性
+全面支持Paddle、ONNX、Caffe等市面主流模型结构可视化,广泛支持各类用户进行可视化分析。
-### High Dimensional
-用高维度数据映射在2D/3D来可视化嵌入
+### 全面支持
+
+与飞桨服务平台及工具组件全面打通,为您在飞桨生态系统中提供最佳使用体验。
-
-
-
-## 快速尝试
-请使用下面的命令,来快速测试 VisualDL。
+## 安装方式
+
+### 使用pip安装
+
+```shell
+pip install --upgrade --pre visualdl
```
-# 安装,建議是在虚拟环境或anaconda下。
-pip install --upgrade visualdl
-# 运行一个例子,vdl_create_scratch_log 将创建测试日志
-vdl_create_scratch_log
-visualdl --logdir=scratch_log --port=8080
+### 使用代码安装
-# 访问 http://127.0.0.1:8080
```
+git clone https://github.com/PaddlePaddle/VisualDL.git
+cd VisualDL
-如果出现`TypeError: __init__() got an unexpected keyword argument 'file'`, 是因为protobuf不是3.5以上,运行`pip install --upgrade protobuf`就能解决。
+python setup.py bdist_wheel
+pip install --upgrade dist/visualdl-*.whl
+```
-如果以上步骤还有出现其他问题,很可能是因为python或pip不同版本或不同位置所致,以下安装方法能解决。
+需要注意,官方自2020年1月1日起不再维护Python2,为了保障代码可用性,VisualDL现仅支持Python3
-## 使用 virtualenv 安装
+## 使用方式
-[Virtualenv](https://virtualenv.pypa.io/en/stable/) 能创建独立Python环境,也能确保Python和pip的相对位置正确。
+VisualDL将训练过程中的数据、参数等信息储存至日志文件中后,启动面板即可查看可视化结果。
-在macOS上,安装pip和virtualenv如下:
-```
-sudo easy_install pip
-pip install --upgrade virtualenv
-```
+### 1. 记录日志
-在Linux上,安装pip和virtualenv如下:
-```
-sudo apt-get install python3-pip python3-dev python-virtualenv
+VisualDL的后端提供了Python SDK,可通过LogWriter定制一个日志记录器,接口如下:
+
+```python
+class LogWriter(logdir=None,
+ comment='',
+ max_queue=10,
+ flush_secs=120,
+ filename_suffix='',
+ write_to_disk=True,
+ **kwargs)
```
-然后创建一个虚拟环境:
+#### 接口参数
+
+| 参数 | 格式 | 含义 |
+| --------------- | ------- | ------------------------------------------------------------ |
+| logdir | string | 日志文件所在的路径,VisualDL将在此路径下建立日志文件并进行记录,如果不填则默认为`runs/${CURRENT_TIME}` |
+| comment | string | 为日志文件夹名添加后缀,如果制定了logdir则此项无效 |
+| max_queue | int | 日志记录消息队列的最大容量,达到此容量则立即写入到日志文件 |
+| flush_secs | int | 日志记录消息队列的最大缓存时间,达到此时间则立即写入到日志文件 |
+| filename_suffix | string | 为默认的日志文件名添加后缀 |
+| write_to_disk | boolean | 是否写入到磁盘 |
+
+#### 示例
+
+设置日志文件并记录标量数据:
+
+```python
+from visualdl import LogWriter
+
+# 在`./log/scalar_test/train`路径下建立日志文件
+with LogWriter(logdir="./log/scalar_test/train") as writer:
+ # 使用scalar组件记录一个标量数据
+ writer.add_scalar(tag="acc", step=1, value=0.5678)
+ writer.add_scalar(tag="acc", step=2, value=0.6878)
+ writer.add_scalar(tag="acc", step=3, value=0.9878)
```
-virtualenv ~/vdl # for Python2.7
-virtualenv -p python3 ~/vdl for Python 3.x
+
+### 2. 启动面板
+
+在上述示例中,日志已记录三组标量数据,现可启动VisualDL面板查看日志的可视化结果,共有两种启动方式:
+
+#### 在命令行启动
+
+使用命令行启动VisualDL面板,命令格式如下:
+
+```python
+visualdl --logdir --host --port --cache-timeout --language --public-path --api-only
```
-```~/vdl``` 是你的Virtualenv目录, 你也可以选择任一目录。
+参数详情:
+
+| 参数 | 意义 |
+| --------------- | ------------------------------------------------------------ |
+| --logdir | 设定日志所在目录,可以指定多个目录,VisualDL将遍历并且迭代寻找指定目录的子目录,将所有实验结果进行可视化 |
+| --model | 设定模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化,目前可支持PaddlePaddle、ONNX、Keras、Core ML、Caffe等多种模型结构,详情可查看[graph支持模型种类]([https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--%E7%BD%91%E7%BB%9C%E7%BB%93%E6%9E%84%E7%BB%84%E4%BB%B6](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--网络结构组件)) |
+| --host | 设定IP,默认为`127.0.0.1` |
+| --port | 设定端口,默认为`8040` |
+| --cache-timeout | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 |
+| --language | VisualDL面板语言,可指定为'EN'或'ZH',默认为浏览器使用语言 |
+| --public-path | VisualDL面板URL路径,默认是'/app',即访问地址为'http://<host>:<port>/app' |
+| --api-only | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://<host>:<port>/<public_path>/api';若没有设置public_path参数,则默认为'http://<host>:<port>/api' |
+
+针对上一步生成的日志,启动命令为:
-激活虚拟环境如下:
```
-source ~/vdl/bin/activate
+visualdl --logdir ./log
```
-现在再安装 VisualDL 和运行范例:
+#### 在Python脚本中启动
+支持在Python脚本中启动VisualDL面板,接口如下:
+
+```python
+visualdl.server.app.run(logdir,
+ host="127.0.0.1",
+ port=8080,
+ cache_timeout=20,
+ language=None,
+ public_path=None,
+ api_only=False,
+ open_browser=False)
```
-pip install --upgrade visualdl
-# 运行一个例子,vdl_create_scratch_log 将创建测试日志
-vdl_create_scratch_log
-visualdl --logdir=scratch_log --port=8080
+请注意:除`logdir`外,其他参数均为不定参数,传递时请指明参数名。
+
+接口参数具体如下:
+
+| 参数 | 格式 | 含义 |
+| ------------- | ------------------------------------------------ | ------------------------------------------------------------ |
+| logdir | string或list[string_1, string_2, ... , string_n] | 日志文件所在的路径,VisualDL将在此路径下递归搜索日志文件并进行可视化,可指定单个或多个路径 |
+| model | string | 模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化 |
+| host | string | 指定启动服务的ip,默认为`127.0.0.1` |
+| port | int | 启动服务端口,默认为`8040` |
+| cache_timeout | int | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 |
+| language | string | VisualDL面板语言,可指定为'en'或'zh',默认为浏览器使用语言 |
+| public_path | string | VisualDL面板URL路径,默认是'/app',即访问地址为'http://:/app' |
+| api_only | boolean | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://://api';若没有设置public_path参数,则默认为http://:/api' |
+| open_browser | boolean | 是否打开浏览器,设置为True则在启动后自动打开浏览器并访问VisualDL面板,若设置api_only,则忽略此参数 |
+
+针对上一步生成的日志,我们的启动脚本为:
-# 访问 http://127.0.0.1:8080
+```python
+from visualdl.server import app
+
+app.run(logdir="./log")
```
-如果在虚拟环境下仍然遇到安装问题,请尝试以下方法。
+在使用任意一种方式启动VisualDL面板后,打开浏览器访问VisualDL面板,即可查看日志的可视化结果,如图:
+
+
+
-## 使用 Anaconda 安装
-Anaconda是一个用于科学计算的Python发行版,提供了包管理与环境管理的功能,可以很方便地解决多版本python并存、切换以及各种第三方包安装问题。
-请根据[Anaconda下载网站](https://www.anaconda.com/download) 的指示去下载和安装Anaconda.
-下载Python 3.6版本的command-Line installer.
+## 可视化功能概览
-创建conda环境名字为```vdl```或任何名字:
-```
-conda create -n vdl pip python=2.7 # or python=3.3, etc.
-```
+### Scalar
-激活conda环境如下:
-```
-source activate vdl
-```
+以图表形式实时展示训练过程参数,如loss、accuracy。让用户通过观察单组或多组训练参数变化,了解训练过程,加速模型调优。具有两大特点:
-现在再安装 VisualDL 和运行范例:
+#### 动态展示
-```
-pip install --upgrade visualdl
+在启动VisualDL后,LogReader将不断增量的读取日志中数据并供前端调用展示,因此能够在训练中同步观测指标变化,如下图:
-# 运行一个例子,vdl_create_scratch_log 将创建测试日志
-vdl_create_scratch_log
-visualdl --logdir=scratch_log --port=8080
+
+
+
-# 访问 http://127.0.0.1:8080
-```
-如果仍然遇到安装问题,请尝试以下用源代码安装方法。
-### 使用代码安装
-```
-#建議是在虚拟环境或anaconda下。
-git clone https://github.com/PaddlePaddle/VisualDL.git
-cd VisualDL
+#### 多实验对比
-python setup.py bdist_wheel
-pip install --upgrade dist/visualdl-*.whl
-```
+只需在启动VisualDL时将每个实验日志所在路径同时传入即可,每个实验中相同tag的指标将绘制在一张图中同步呈现,如下图:
-如果打包和安装遇到其他问题,不安装只想运行Visual DL可以看[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/develop/how_to_dev_frontend_cn.md)
+
+
+
-## SDK
-VisualDL 同时提供了python SDK 和 C++ SDK 来实现不同方式的使用。
-### Python SDK
-VisualDL 现在支持 Python 2和 Python 3。
+### Image
-以最简单的Scalar组件为例,尝试创建一个scalar组件并插入多个时间步的数据:
+实时展示训练过程中的图像数据,用于观察不同训练阶段的图像变化,进而深入了解训练过程及效果。
-```python
-import random
-from visualdl import LogWriter
+
+
+
-logdir = "./tmp"
-logger = LogWriter(logdir, sync_cycle=10000)
-# mark the components with 'train' label.
-with logger.mode("train"):
- # create a scalar component called 'scalars/scalar0'
- scalar0 = logger.scalar("scalars/scalar0")
-# add some records during DL model running.
-for step in range(100):
- scalar0.add_record(step, random.random())
-```
+### Audio
-### C++ SDK
-上面 Python SDK 中代码完全一致的C++ SDK用法如下
-```c++
-#include
-#include
-#include "visualdl/sdk.h"
+实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。
-namespace vs = visualdl;
-namespace cp = visualdl::components;
+
+
+
-int main() {
- const std::string dir = "./tmp";
- vs::LogWriter logger(dir, 10000);
- logger.SetMode("train");
- auto tablet = logger.AddTablet("scalars/scalar0");
- cp::Scalar scalar0(tablet);
+### Graph
- for (int step = 0; step < 1000; step++) {
- float v = (float)std::rand() / RAND_MAX;
- scalar0.AddRecord(step, v);
- }
+一键可视化模型的网络结构。可查看模型属性、节点信息、节点输入输出等,并支持节点搜索,辅助用户快速分析模型结构与了解数据流向。
- return 0;
-}
-```
-## 启动Board
-当训练过程中已经产生了日志数据,就可以启动board进行实时预览可视化信息
+
+
+
-```
-visualdl --logdir
-```
-board 还支持一下参数来实现远程的访问:
-- `--host` 设定IP
-- `--port` 设定端口
-- `-m / --model_pb` 指定 ONNX 格式的模型文件
+### Histogram
+
+以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。
+
+- Offset模式
+
+
+
+
-### 贡献
-VisualDL 是由 [PaddlePaddle](http://www.paddlepaddle.org/) 和
-[ECharts](http://echarts.baidu.com/) 合作推出的开源项目。我们欢迎所有人使用,提意见以及贡献代码。
+
+- Overlay模式
+
+
+
+
+
+
+
+### PR Curve
+
+精度-召回率曲线,帮助开发者权衡模型精度和召回率之间的平衡,设定最佳阈值。
+
+
+
+
+
+
+### High Dimensional
+
+将高维数据进行降维展示,目前支持T-SNE、PCA两种降维方式,用于深入分析高维数据间的关系,方便用户根据数据特征进行算法优化。
+
+
+
+
+
+## 开源贡献
+
+VisualDL 是由 [PaddlePaddle](https://www.paddlepaddle.org/) 和 [ECharts](https://echarts.apache.org/) 合作推出的开源项目。
+Graph 相关功能由 [Netron](https://github.com/lutzroeder/netron) 提供技术支持。
+欢迎所有人使用,提意见以及贡献代码。
+
## 更多细节
-想了解更多关于VisualDL的使用介绍,请查看[文档](https://github.com/PaddlePaddle/VisualDL/tree/develop/demo)
+想了解更多关于VisualDL可视化功能的使用详情介绍,请查看**VisualDL使用指南**。
+
+## 技术交流
+
+欢迎您加入VisualDL官方QQ群:1045783368 与飞桨团队以及其他用户共同针对VisualDL进行讨论与交流。
diff --git a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md
index dc1778d82d3c4780630525ca33518f891a2f0b2f..e6a6445e3d4a89501f236bba6cf5623304ab3024 100644
--- a/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md
+++ b/doc/fluid/advanced_guide/evaluation_debugging/debug/visualdl_usage.md
@@ -1,622 +1,774 @@
# VisualDL 使用指南
-## 概述
+### 概述
+
VisualDL 是一个面向深度学习任务设计的可视化工具。VisualDL 利用了丰富的图表来展示数据,用户可以更直观、清晰地查看数据的特征与变化趋势,有助于分析数据、及时发现错误,进而改进神经网络模型的设计。
-目前,VisualDL 支持 scalar, histogram, image, text, audio, high dimensional, graph 这七个组件:
+目前,VisualDL 支持 scalar, image, audio, graph, histogram, pr curve, high dimensional 七个组件,项目正处于高速迭代中,敬请期待新组件的加入。
-|组件名称|展示图表|作用|
-|:----:|:---:|:---|
-|scalar |折线图|动态展示损失函数值、准确率等标量数据|
-|histogram |直方图|动态展示参数矩阵的数值分布与变化趋势,便于查看权重矩阵、偏置项、梯度等参数的变化|
-|image |图片|显示图片,可显示输入图片和处理后的结果,便于查看中间过程的变化|
-|text |文本|展示文本,有助于 NLP 等领域的用户进行数据分析和结果判断|
-|audio |音频|可直接播放音频,也支持下载,有助于语音识别等领域的用户进行数据分析和结果判断|
-|high dimensional |坐标|将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性|
-|graph |有向图|展示神经网络的模型结构|
+| 组件名称 | 展示图表 | 作用 |
+| :-------------------------------------------------: | :--------: | :----------------------------------------------------------- |
+| [ Scalar](#Scalar--标量组件) | 折线图 | 动态展示损失函数值、准确率等标量数据 |
+| [Image](#Image--图片可视化组件) | 图片可视化 | 显示图片,可显示输入图片和处理后的结果,便于查看中间过程的变化 |
+| [Audio](#Audio--音频播放组件) | 音频播放 | 播放训练过程中的音频数据,监控语音识别与合成等任务的训练过程 |
+| [Graph](#Graph--网络结构组件) | 网络结构 | 展示网络结构、节点属性及数据流向,辅助学习、优化网络结构 |
+| [Histogram](#Histogram--直方图组件) | 直方图 | 展示训练过程中权重、梯度等张量的分布 |
+| [PR Curve](#PR-Curve--PR曲线组件) | 折线图 | 权衡精度与召回率之间的平衡关系,便于选择最佳阈值 |
+| [High Dimensional](#High-Dimensional--数据降维组件) | 数据降维 | 将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性 |
-## 动态添加数据组件
+## Scalar -- 折线图组件
-要想使用 VisualDL 的 scalar, histogram, image, text, audio, high dimensional 这六个组件来添加数据,都必须先初始化记录器 `LogWriter`,以设置数据在本地磁盘的保存路径以及同步周期。此后各个组件的输入数据会先保存到本地磁盘,进而才能加载到前端网页中展示。
+### 介绍
-### LogWriter -- 记录器
+Scalar 组件的输入数据类型为标量,该组件的作用是将训练参数以折线图形式呈现。将损失函数值、准确率等标量数据作为参数传入 scalar 组件,即可画出折线图,便于观察变化趋势。
-LogWriter 是一个数据记录器,在数据记录过程中,LogWriter 会周期性地将数据写入指定路径。
+### 记录接口
-LogWriter 的定义为:
+Scalar 组件的记录接口如下:
```python
-class LogWriter(dir, sync_cycle)
+add_scalar(tag, value, step, walltime=None)
```
-> :param dir : 指定日志文件的保存路径。
-> :param sync_cycle : 同步周期。经过 sync_cycle 次添加数据的操作,就执行一次将数据从内存写入磁盘的操作。
-> :return: 函数返回一个 LogWriter 对象。
-
-例1 创建一个 LogWriter 对象
-
-```python
-# 创建一个 LogWriter 对象 log_writer
-log_writer = LogWriter("./log", sync_cycle=10)
-```
+接口参数说明如下:
-LogWriter类的成员函数包括:
+| 参数 | 格式 | 含义 |
+| -------- | ------ | ------------------------------------------- |
+| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` |
+| value | float | 要记录的数据值 |
+| step | int | 记录的步数 |
+| walltime | int | 记录数据的时间戳,默认为当前时间戳 |
-* `mode()`;
-* `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()`;
+### Demo
-成员函数 `mode()` 用于指定模式。模式的名称是自定义的,比如训练`train`,验证`validation`,测试`test`,第一层卷积`conv_layer1`。 有着相同模式名称的组件作为一个整体,用户可在前端网页中的 `Runs` 按钮中选择显示哪个模式的数据(默认是显示全部模式)。
+- 基础使用
-成员函数 `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()` 用于创建组件。
-
-例2 LogWriter 创建组件
+下面展示了使用 Scalar 组件记录数据的示例,代码文件请见[Scalar组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py)
```python
-# 设定模式为 train,创建一个 scalar 组件
-with log_writer.mode("train") as logger:
- train_scalar = logger.scalar("acc")
-# 设定模式为test,创建一个 image 组件
-with log_writer.mode("test") as shower:
- test_image = shower.image("conv_image", 10, 1)
+from visualdl import LogWriter
+
+if __name__ == '__main__':
+ value = [i/1000.0 for i in range(1000)]
+ # 初始化一个记录器
+ with LogWriter(logdir="./log/scalar_test/train") as writer:
+ for step in range(1000):
+ # 向记录器添加一个tag为`acc`的数据
+ writer.add_scalar(tag="acc", step=step, value=value[step])
+ # 向记录器添加一个tag为`loss`的数据
+ writer.add_scalar(tag="loss", step=step, value=1/(value[step] + 1))
```
-### scalar -- 折线图组件
+运行上述程序后,在命令行执行
-scalar 组件的输入数据类型为标量,该组件的作用是画折线图。将损失函数值、准确率等标量数据作为参数传入 scalar 组件,即可画出折线图,便于观察变化趋势。
+```shell
+visualdl --logdir ./log --port 8080
+```
-想通过 scalar 组件画折线图,只需先设定 LogWriter 对象的成员函数 `scalar()`,即可使用 `add_record()` 函数添加数据。这两个函数的具体用法如下:
+接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图。
-* LogWriter 对象的成员函数 `scalar()`:
+
+
+
-```python
-def scalar(tag, type)
-```
-> :param tag : 标签,tag 相同的折线在同一子框,否则不同,tag 的名称中不能有 % 这个字符。
-> :param type : 数据类型,可选“float”, "double", "int",默认值为 "float"。
-> :return: 函数返回一个 ScalarWriter 对象。
-* scalar 组件的成员函数 `add_record()`:
+- 多组实验对比
-```python
-def add_record(step, value)
-```
+下面展示了使用Scalar组件实现多组实验对比
-> :param step : 步进数,标记这是第几个添加的数据。
-> :param value : 输入数据。
+多组实验对比的实现分为两步:
-例3 scalar 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/scalar-demo.py)
+1. 创建子日志文件储存每组实验的参数数据
+2. 将数据写入scalar组件时,**使用相同的tag**,即可实现对比**不同实验**的**同一类型参数**
```python
-# coding=utf-8
from visualdl import LogWriter
-# 创建 LogWriter 对象
-log_writer = LogWriter("./log", sync_cycle=20)
+if __name__ == '__main__':
+ value = [i/1000.0 for i in range(1000)]
+ # 步骤一:创建父文件夹:log与子文件夹:scalar_test
+ with LogWriter(logdir="./log/scalar_test") as writer:
+ for step in range(1000):
+ # 步骤二:向记录器添加一个tag为`train/acc`的数据
+ writer.add_scalar(tag="train/acc", step=step, value=value[step])
+ # 步骤二:向记录器添加一个tag为`train/loss`的数据
+ writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1))
+ # 步骤一:创建第二个子文件夹scalar_test2
+ value = [i/500.0 for i in range(1000)]
+ with LogWriter(logdir="./log/scalar_test2") as writer:
+ for step in range(1000):
+ # 步骤二:在同样名为`train/acc`下添加scalar_test2的accuracy的数据
+ writer.add_scalar(tag="train/acc", step=step, value=value[step])
+ # 步骤二:在同样名为`train/loss`下添加scalar_test2的loss的数据
+ writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1))
+```
+
+运行上述程序后,在命令行执行
-# 创建 scalar 组件,模式为 train
-with log_writer.mode("train") as logger:
- train_acc = logger.scalar("acc")
- train_loss = logger.scalar("loss")
+```shell
+visualdl --logdir ./log --port 8080
+```
-# 创建 scalar 组件,模式设为 test, tag 设为 acc
-with log_writer.mode("test") as logger:
- test_acc = logger.scalar("acc")
+接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图,对比「scalar_test」和「scalar_test2」的Accuracy和Loss。
-value = [i/1000.0 for i in range(1000)]
-for step in range(1000):
- # 向名称为 acc 的图中添加模式为train的数据
- train_acc.add_record(step, value[step])
+
+
+
- # 向名称为 loss 的图中添加模式为train的数据
- train_loss.add_record(step, 1 / (value[step] + 1))
- # 向名称为 acc 的图中添加模式为test的数据
- test_acc.add_record(step, 1 - value[step])
-```
+*多组实验对比的应用案例可参考AI Studio项目:[VisualDL 2.0--眼疾识别训练可视化](https://aistudio.baidu.com/aistudio/projectdetail/502834)
-运行上述程序后,在命令行中执行
-```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
-```
+### 功能操作说明
-接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),即可查看以下折线图。
+* 支持数据卡片「最大化」、「还原」、「坐标系转化」(y轴对数坐标)、「下载」折线图
-
-图1. scalar 组件展示折线图
+
-VisualDL 页面的右边侧栏有各个组件的调节选项,以 scalar 组件为例:
-* Smoothing : 用于调节曲线的平滑度。
-* X-axis : 折线图的横坐标参数,可选 `Step`, `Relative`, `Wall Time`,分别表示横轴设为步进数、相对值、数据采集的时间。
-* Tooltip sorting : 标签排序方法,可选 `default`, `descending`, `ascending`, `nearest`,分别表示默认排序、按名称降序、按名称升序、按最新更新时间排序。
-VisualDL 页面的右边侧栏的最下方还有一个 `RUNNING` 按钮,此时前端定期从后端同步数据,刷新页面。点击可切换为红色的 `STOPPED`,暂停前端的数据更新。
-### histogram -- 直方图组件
-histogram 组件的作用是以直方图的形式显示输入数据的分布。在训练过程中,把一些参数(例如权重矩阵 w,偏置项 b,梯度)传给 histogram 组件,就可以查看参数分布在训练过程中的变化趋势。
+* 数据点Hover展示详细信息
-想通过 histogram 组件画参数直方图,只需先设定 LogWriter 对象的成员函数 `histogram()`,即可使用 `add_record()` 函数添加数据。这两个函数的具体用法如下:
+
+
+
-* LogWriter 对象的成员函数 `histogram()`:
-```python
-def histogram(tag, num_buckets, type)
-```
-> :param tag : 标签,结合 LogWriter 指定的模式,决定输入参数显示的子框。
-> :param num_buckets : 直方图的柱子数量。
-> :param type : 数据类型,可选“float”, "double", "int",默认值为 "float"。
-> :return: 函数返回一个 HistogramWriter 对象。
-* histogram 组件的成员函数 `add_record()`:
-```python
-def add_record(step, data)
-```
+* 可搜索卡片标签,展示目标图像
-> :param step : 步进数,标记这是第几组添加的数据。
-> :param data : 输入参数, 数据类型为 list[]。
+
+
+
-例4 histogram 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/histogram-demo.py)
-```python
-# coding=utf-8
-import numpy as np
-from visualdl import LogWriter
-# 创建 LogWriter 对象
-log_writer = LogWriter('./log', sync_cycle=10)
-# 创建 histogram 组件,模式为train
-with log_writer.mode("train") as logger:
- param1_histogram = logger.histogram("param1", num_buckets=100)
-# 设定步数为 1 - 100
-for step in range(1, 101):
- # 添加的数据为随机分布,所在区间值变小
- interval_start = 1 + 2 * step/100.0
- interval_end = 6 - 2 * step/100.0
- data = np.random.uniform(interval_start, interval_end, size=(10000))
+* 可搜索打点数据标签,展示特定数据
+
+
+
+
- # 使用 add_record() 函数添加数据
- param1_histogram.add_record(step, data)
-```
-运行上述程序后,在命令行中执行
-```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
-```
-接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),即可查看 histogram 组件的直方图。其中横坐标为参数的数值,曲线上的值为相应参数的个数。右边纵轴的值为 Step,不同 Step 的数据组用不同颜色加以区分。
+* X轴有三种衡量尺度
+
+1. Step:迭代次数
+2. Walltime:训练绝对时间
+3. Relative:训练时长
-
-图2. histogram 组件展示直方图
+
-### image -- 图片可视化组件
-image 组件用于显示图片。在程序运行过程中,将图片数据传入 image 组件,就可在 VisualDL 的前端网页看到相应图片。
-使用 image 组件添加数据,需要先设定 LogWriter 对象的成员函数 `image()`,即可结合 `start_sampling()`, `is_sample_taken()`, `set_sample()` 和 `finish_sample()` 这四个 image 组件的成员函数来完成。这几个函数的定义及用法如下:
+* 可调整曲线平滑度,以便更好的展现参数整体的变化趋势
-* LogWriter 对象的成员函数 `image()`:
+
+
+
-```python
-def image(tag, num_samples, step_cycle)
-```
-> :param tag : 标签,结合 set_sample() 的参数 index,决定图片显示的子框。
-> :param num_samples : 设置单个 step 的采样数,页面上的图片数目也等于 num_samples。
-> :param step_cycle : 将 step_cycle 个 step 的数据存储到日志中,默认值为 1。
-> :return: 函数返回一个 ImageWriter 对象。
-* 开始新的采样周期 - 开辟一块内存空间,用于存放采样的数据:
-```python
-def start_sampling()
-```
+## Image -- 图片可视化组件
-* 判断该图片是否应被采样,当返回值为 `-1`,表示不用采样,否则,应被采样:
+### 介绍
-```python
-def is_sample_taken()
-```
+Image 组件用于显示图片数据随训练的变化。在模型训练过程中,将图片数据传入 Image 组件,就可在 VisualDL 的前端网页查看相应图片。
-* 使用函数 `set_sample()` 添加图片数据:
+### 记录接口
+
+Image 组件的记录接口如下:
```python
-def set_sample(index, image_shape, image_data)
+add_image(tag, img, step, walltime=None)
```
-> :param index : 索引号,与 tag 组合使用,决定图片显示的子框。
-> :param image_shape : 图片的形状,[weight, height, 通道数(RGB 为 3,灰度图为 1)]。
-> :param image_data : 图片的数据格式为矩阵,通常为 numpy.ndarray,经 flatten() 后变为行向量。
+接口参数说明如下:
-* 结束当前的采样周期,将已采样的数据存到磁盘,并释放这一块内存空间:
+| 参数 | 格式 | 含义 |
+| -------- | ------------- | ------------------------------------------- |
+| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` |
+| img | numpy.ndarray | 以ndarray格式表示的图片 |
+| step | int | 记录的步数 |
+| walltime | int | 记录数据的时间戳,默认为当前时间戳 |
-```python
-def finish_sample()
-```
+### Demo
+
+下面展示了使用 Image 组件记录数据的示例,代码文件请见[Image组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py)
-例5 image 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/image-demo.py)
```python
-# coding=utf-8
import numpy as np
-from visualdl import LogWriter
from PIL import Image
+from visualdl import LogWriter
def random_crop(img):
- '''
- 此函数用于获取图片数据 img 的 100*100 的随机分块
- '''
+ """获取图片的随机 100x100 分片
+ """
img = Image.open(img)
w, h = img.size
random_w = np.random.randint(0, w - 100)
random_h = np.random.randint(0, h - 100)
- return img.crop((random_w, random_h, random_w + 100, random_h + 100))
-
-
-# 创建 LogWriter 对象
-log_writer = LogWriter("./log", sync_cycle=10)
-
-# 创建 image 组件,模式为train, 采样数设为 ns
-ns = 2
-with log_writer.mode("train") as logger:
- input_image = logger.image(tag="test", num_samples=ns)
-
-# 一般要设置一个变量 sample_num,用于记录当前已采样了几个 image 数据
-sample_num = 0
-
-for step in range(6):
- # 设置start_sampling() 的条件,满足条件时,开始采样
- if sample_num == 0:
- input_image.start_sampling()
-
- # 获取idx
- idx = input_image.is_sample_taken()
- # 如果 idx != -1,采样,否则跳过
- if idx != -1:
- # 获取图片数据
- image_path = "test.jpg"
- image_data = np.array(random_crop(image_path))
- # 使用 set_sample() 函数添加数据
- # flatten() 用于把 ndarray 由矩阵变为行向量
- input_image.set_sample(idx, image_data.shape, image_data.flatten())
- sample_num += 1
-
- # 如果完成了当前轮的采样,则调用finish_sample()
- if sample_num % ns == 0:
- input_image.finish_sampling()
- sample_num = 0
+ r = img.crop((random_w, random_h, random_w + 100, random_h + 100))
+ return np.asarray(r)
+
+
+if __name__ == '__main__':
+ # 初始化一个记录器
+ with LogWriter(logdir="./log/image_test/train") as writer:
+ for step in range(6):
+ # 添加一个图片数据
+ writer.add_image(tag="eye",
+ img=random_crop("../../docs/images/eye.jpg"),
+ step=step)
```
-运行上述程序后,在命令行中执行
+运行上述程序后,在命令行执行
+
```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
+visualdl --logdir ./log --port 8080
```
-接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的`SAMPLES`选项,即可查看 image 组件的展示图片。每一张子图都有一条浅绿色的横轴,拖动即可展示不同 step 的图片。
+在浏览器输入`http://127.0.0.1:8080`,即可查看图片数据。
-
-图3. image 组件展示图片
+
-### text -- 文本组件
-text 组件用于显示文本,在程序运行过程中,将文本数据传入 text 组件,即可在 VisualDL 的前端网页中查看。
-
-想要通过 text 组件添加数据,只需先设定 LogWriter 对象的成员函数 `text()`,即可使用 `add_record()` 函数来完成。这两个函数的具体用法如下:
-* LogWriter 对象的成员函数 `text()`:
-```python
-def text(tag)
-```
-> :param tag : 标签,结合 LogWriter 设定的模式,决定文本显示的子框。
-> :return: 函数返回一个 TextWriter 对象。
-
-* text 组件的成员函数 `add_record()`:
-
-```python
-def add_record(step, str)
-```
-
-> :param step : 步进数,标记这是第几组添加的数据。
-> :param str : 输入文本,数据类型为 string。
-
-例6 text 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/text-demo.py)
-
-```python
-# coding=utf-8
-from visualdl import LogWriter
+### 功能操作说明
-# 创建 LogWriter 对象
-log_writter = LogWriter("./log", sync_cycle=10)
+可搜索图片标签显示对应图片数据
-# 创建 text 组件,模式为 train, 标签为 test
-with log_writter.mode("train") as logger:
- vdl_text_comp = logger.text(tag="test")
+
+
+
-# 使用 add_record() 函数添加数据
-for i in range(1, 6):
- vdl_text_comp.add_record(i, "这是第 %d 个 step 的数据。" % i)
- vdl_text_comp.add_record(i, "This is data %d ." % i)
-```
-运行上述程序后,在命令行中执行
-```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
-```
-接着在浏览器打开 [http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `SAMPLES` 选项,即可查看 text 组件的展示文本。每一张小框都有一条浅绿色的横轴,拖动即可显示不同 step 的文本。
+支持滑动Step/迭代次数查看不同迭代次数下的图片数据
-
-图4. text 组件展示文本
+
-### audio -- 音频播放组件
- audio 为音频播放组件,在程序运行过程中,将音频数据传入 audio 组件,就可以在 VisualDL 的前端网页中直接播放或下载。
-使用 audio 组件添加数据,需要先设定 LogWriter 对象的成员函数 `audio()`,即可结合 `start_sampling()`, `is_sample_taken()`, `set_sample()` 和 `finish_sample()` 这四个 audio 组件的成员函数来完成。这几个函数的定义和用法如下:
-* LogWriter 对象的成员函数 `audio()`:
-```python
-def audio(tag, num_samples, step_cycle)
-```
+## Audio--音频播放组件
-> :param tag : 标签,结合 set_sample() 的参数 index,决定音频播放的子框。
-> :param num_samples : 设置单个 step 的采样数,页面上的音频数目也等于 num_samples。
-> :param step_cycle : 将 step_cycle 个 step 的数据存储到日志中,默认值为 1。
-> :return: 函数返回一个 AudioWriter 对象。
+### 介绍
-* 开始新的采样周期 - 开辟一块内存空间,用于存放采样的数据:
+Audio组件实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。
-```python
-def start_sampling()
-```
+### 记录接口
-* 判断该音频是否应被采样,当返回值为 `-1`,表示不用采样,否则,应被采样:
+Audio 组件的记录接口如下:
```python
-def is_sample_taken()
+add_audio(tag, audio_array, step, sample_rate)
```
-* 使用函数 `set_sample()` 添加音频数据:
+接口参数说明如下:
-```python
-def set_sample(index, audio_params, audio_data)
-```
+| 参数 | 格式 | 含义 |
+| ----------- | ------------- | ------------------------------------------ |
+| tag | string | 记录指标的标志,如`audio_tag`,不能含有`%` |
+| audio_arry | numpy.ndarray | 以ndarray格式表示的音频 |
+| step | int | 记录的步数 |
+| sample_rate | int | 采样率,**注意正确填写对应音频的原采样率** |
-> :param index : 索引号,结合 tag,决定音频播放的子框。
-> :param audio_params : 音频的参数 [sample rate, sample width, channel],其中 sample rate 为采样率, sample width 为每一帧采样的字节数, channel 为通道数(单声道设为1,双声道设为2,四声道设为4,以此类推)。
-> :param audio_data :音频数据,音频数据的格式一般为 numpy.ndarray,经 flatten() 后变为行向量。
+### Demo
-* 结束当前的采样周期,将已采样的数据存到磁盘,并释放这一块内存空间:
+下面展示了使用 Audio 组件记录数据的示例,代码文件请见[Audio组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py)
```python
-def finish_sample()
-```
-
-例7 audio 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/audio-demo.py)
-```python
-# coding=utf-8
+from visualdl import LogWriter
import numpy as np
import wave
-from visualdl import LogWriter
def read_audio_data(audio_path):
"""
- 读取音频数据
+ Get audio data.
"""
CHUNK = 4096
f = wave.open(audio_path, "rb")
wavdata = []
chunk = f.readframes(CHUNK)
-
while chunk:
- data = np.fromstring(chunk, dtype='uint8')
+ data = np.frombuffer(chunk, dtype='uint8')
wavdata.extend(data)
chunk = f.readframes(CHUNK)
-
# 8k sample rate, 16bit frame, 1 channel
shape = [8000, 2, 1]
-
return shape, wavdata
-# 创建一个 LogWriter 对象
-log_writter = LogWriter("./log", sync_cycle=10)
-
-# 创建 audio 组件,模式为 train
-ns = 2
-with log_writter.mode("train") as logger:
- input_audio = logger.audio(tag="test", num_samples=ns)
-
-# 一般要设定一个变量 audio_sample_num,用来记录当前已采样了几段 audio 数据
-audio_sample_num = 0
-
-for step in range(9):
- # 设置 start_sampling() 的条件,满足条件时,开始采样
- if audio_sample_num == 0:
- input_audio.start_sampling()
-
- # 获取 idx
- idx = input_audio.is_sample_taken()
- # 如果 idx != -1,采样,否则跳过
- if idx != -1:
- # 读取数据,音频文件的格式可以为 .wav .mp3 等
- audio_path = "test.wav"
- audio_shape, audio_data = read_audio_data(audio_path)
- # 使用 set_sample()函数添加数据
- input_audio.set_sample(idx, audio_shape, audio_data)
- audio_sample_num += 1
-
- # 如果完成了当前轮的采样,则调用 finish_sample()
- if audio_sample_num % ns ==0:
- input_audio.finish_sampling()
- audio_sample_num = 0
+if __name__ == '__main__':
+ with LogWriter(logdir="./log") as writer:
+ audio_shape, audio_data = read_audio_data("./testing.wav")
+ audio_data = np.array(audio_data)
+ writer.add_audio(tag="audio_tag",
+ audio_array=audio_data,
+ step=0,
+ sample_rate=8000)
```
-运行上述程序后,在命令行中执行
+运行上述程序后,在命令行执行
```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
+visualdl --logdir ./log --port 8080
```
-接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `SAMPLES` 选项,即有音频的小框,可以播放和下载。每一张小框中都有一条浅绿色的横轴,拖动即可选择不同 step 的音频段。
+在浏览器输入`http://127.0.0.1:8080`,即可查看音频数据。
+
+
+
+
+
+
+
+### 功能操作说明
+
+- 可搜索音频标签显示对应音频数据
+
+
+
+
+
+
+
+- 支持滑动Step/迭代次数试听不同迭代次数下的音频数据
+
+
+
+
+
+
+
+- 支持播放/暂停音频数据
+
+
+
+
+
+
+
+- 支持音量调节
-
-图5. audio 组件播放音频
+
-### high dimensional -- 数据降维组件
-high dimensional 组件的作用就是将数据映射到 2D/3D 空间来做可视化嵌入,这有利于了解不同数据的相关性。high dimensional 组件支持以下两种降维算法:
-* PCA : Principle Component Analysis 主成分分析
-* [t-SNE](https://lvdmaaten.github.io/tsne/) : t-distributed stochastic neighbor embedding t-分布式随机领域嵌入
+- 支持音频下载
+
+
+
+
+
+
+
+
+## Graph--网络结构组件
+
+### 介绍
+
+Graph组件一键可视化模型的网络结构。用于查看模型属性、节点信息、节点输入输出等,并进行节点搜索,协助开发者们快速分析模型结构与了解数据流向。
+
+### Demo
+
+共有两种启动方式:
+
+- 前端模型文件拖拽上传:
+
+ - 如只需使用Graph组件,则无需添加任何参数,在命令行执行`visualdl`后即可启动面板进行上传。
+ - 如果同时需使用其他功能,在命令行指定日志文件路径(以`./log`为例)即可启动面板进行上传:
+
+ ```shell
+ visualdl --logdir ./log --port 8080
+ ```
+
+
+
+
+
+
+
+- 后端启动Graph:
+
+ - 在命令行加入参数`--model`并指定**模型文件**路径(非文件夹路径),即可启动并查看网络结构可视化:
+
+ ```shell
+ visualdl --model ./log/model --port 8080
+ ```
+
+
+
+
-想使用 high dimensional 组件,只需先设定 LogWriter 对象的成员函数 `embedding()`,即可使用 `add_embeddings_with_word_dict()` 函数添加数据。这两个函数的定义及用法如下:
-* LogWriter 对象的成员函数 `embedding()` 不需输入参数,函数返回一个 embeddingWriter 对象:
+
+### 功能操作说明
+
+- 一键上传模型
+ - 支持模型格式:PaddlePaddle、ONNX、Keras、Core ML、Caffe、Caffe2、Darknet、MXNet、ncnn、TensorFlow Lite
+ - 实验性支持模型格式:TorchScript、PyTorch、Torch、 ArmNN、BigDL、Chainer、CNTK、Deeplearning4j、MediaPipe、ML.NET、MNN、OpenVINO、Scikit-learn、Tengine、TensorFlow.js、TensorFlow
+
+
+
+
+
+
+
+- 支持上下左右任意拖拽模型、放大和缩小模型
+
+
+
+
+
+
+
+- 搜索定位到对应节点
+
+
+
+
+
+
+
+- 点击查看模型属性
+
+
+
+
+
+
+
+
+
+
+
+
+
+- 支持选择模型展示的信息
+
+
+
+
+
+
+
+- 支持以PNG、SVG格式导出模型结构图
+
+
+
+
+
+
+
+- 点击节点即可展示对应属性信息
+
+
+
+
+
+
+
+- 支持一键更换模型
+
+
+
+
+
+
+
+## Histogram--直方图组件
+
+### 介绍
+
+Histogram组件以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。
+
+### 记录接口
+
+Histogram 组件的记录接口如下:
```python
-def embedding()
+add_histogram(tag, values, step, walltime=None, buckets=10)
```
-* high dimensional 的成员函数 `add_embeddings_with_word_dict()`:
+接口参数说明如下:
+
+| 参数 | 格式 | 含义 |
+| -------- | --------------------- | ------------------------------------------- |
+| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` |
+| values | numpy.ndarray or list | 以ndarray或list格式表示的数据 |
+| step | int | 记录的步数 |
+| walltime | int | 记录数据的时间戳,默认为当前时间戳 |
+| buckets | int | 生成直方图的分段数,默认为10 |
+
+### Demo
+
+下面展示了使用 Histogram组件记录数据的示例,代码文件请见[Histogram组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py)
```python
-def add_embeddings_with_word_dict(data, Dict)
+from visualdl import LogWriter
+import numpy as np
+
+
+if __name__ == '__main__':
+ values = np.arange(0, 1000)
+ with LogWriter(logdir="./log/histogram_test/train") as writer:
+ for index in range(1, 101):
+ interval_start = 1 + 2 * index / 100.0
+ interval_end = 6 - 2 * index / 100.0
+ data = np.random.uniform(interval_start, interval_end, size=(10000))
+ writer.add_histogram(tag='default tag',
+ values=data,
+ step=index,
+ buckets=10)
+```
+
+运行上述程序后,在命令行执行
+
+```shell
+visualdl --logdir ./log --port 8080
```
-> :param data : 输入数据,数据类型为 List[List(float)]。
-> :param Dict : 字典, 数据类型为 Dict[str, int]。
+在浏览器输入`http://127.0.0.1:8080`,即可查看训练参数直方图。
-例8 high dimensional 组件示例程序 [Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/embedding-demo.py)
+### 功能操作说明
+
+- 支持数据卡片「最大化」、直方图「下载」
+
+
+
+
+
+- 可选择Offset或Overlay模式
+
+
+
+
+
+
+ - Offset模式
+
+
+
+
+
+
+
+ - Overlay模式
+
+
+
+
+
+
+- 数据点Hover展示参数值、训练步数、频次
+
+ - 在第240次训练步数时,权重为-0.0031,且出现的频次是2734次
+
+
+
+
+
+- 可搜索卡片标签,展示目标直方图
+
+
+
+
+
+- 可搜索打点数据标签,展示特定数据流
+
+
+
+
+
+## PR Curve--PR曲线组件
+
+### 介绍
+
+PR Curve以折线图形式呈现精度与召回率的权衡分析,清晰直观了解模型训练效果,便于分析模型是否达到理想标准。
+
+### 记录接口
+
+PR Curve组件的记录接口如下:
+
+```python
+add_pr_curve(tag, labels, predictions, step=None, num_thresholds=10)
+```
+
+接口参数说明如下:
+
+| 参数 | 格式 | 含义 |
+| -------------- | --------------------- | ------------------------------------------- |
+| tag | string | 记录指标的标志,如`train/loss`,不能含有`%` |
+| labels | numpy.ndarray or list | 以ndarray或list格式表示的实际类别 |
+| predictions | numpy.ndarray or list | 以ndarray或list格式表示的预测类别 |
+| step | int | 记录的步数 |
+| num_thresholds | int | 阈值设置的个数,默认为10,最大值为127 |
+
+### Demo
+
+下面展示了使用 PR Curve 组件记录数据的示例,代码文件请见[PR Curve组件](#https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py)
```python
-# coding=utf-8
-import numpy as np
from visualdl import LogWriter
+import numpy as np
-# 创建一个 LogWriter 对象
-log_writer = LogWriter("./log", sync_cycle=10)
-
-# 创建一个 high dimensional 组件,模式设为 train
-with log_writer.mode("train") as logger:
- train_embedding = logger.embedding()
-
-# 第一个参数为数据,数据类型为 List[List(float)]
-hot_vectors = np.random.uniform(1, 2, size=(10, 3))
-# 第二个参数为字典,数据类型为 Dict[str, int]
-# 其中第一个分量为坐标点的名称, 第二个分量为该坐标对应原数据的第几行分量
-word_dict = {
- "label_1": 5,
- "label_2": 4,
- "label_3": 3,
- "label_4": 2,
- "label_5": 1,}
-
-# 使用 add_embeddings_with_word_dict(data, Dict)
-train_embedding.add_embeddings_with_word_dict(hot_vectors, word_dict)
+with LogWriter("./log/pr_curve_test/train") as writer:
+ for step in range(3):
+ labels = np.random.randint(2, size=100)
+ predictions = np.random.rand(100)
+ writer.add_pr_curve(tag='pr_curve',
+ labels=labels,
+ predictions=predictions,
+ step=step,
+ num_thresholds=5)
```
-运行上述程序后,在命令行中执行
+运行上述程序后,在命令行执行
```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080
+visualdl --logdir ./log --port 8080
```
-接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的 `HIGHDIMENSIONAL` 选项,即可查看数据映射后的相对位置。
+接着在浏览器打开`http://127.0.0.1:8080`,即可查看PR Curve
-
-图6. high dimensional 组件展示平面坐标
+
-
-
-图7. high dimensional 组件展示直角坐标
-
-## graph -- 神经网络可视化组件
-graph 组件用于神经网络模型结构的可视化,该组件可以展示 Paddle 格式和 [ONNX](https://onnx.ai) 格式保存的模型。graph 组件可帮助用户理解神经网络的模型结构,也有助于排查神经网络的配置错误。
+### 功能操作说明
-与其他需要记录数据的组件不同,使用 graph 组件的唯一要素就是指定模型文件的存放位置,即在 `visualdl` 命令中增加选项 `--model_pb` 来指定模型文件的存放路径,则可在前端看到相应效果。
+- 支持数据卡片「最大化」,「还原」、「下载」PR曲线
-例9 graph 组件示例程序(下面示例展示了如何用 Paddle 保存一个 Lenet-5 模型)[Github](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/component/graph-demo.py)
+
+
+
+
+- 数据点Hover展示详细信息:阈值对应的TP、TN、FP、FN
+
+
+
+
+
+- 可搜索卡片标签,展示目标图表
+
+
+
+
+
+- 可搜索打点数据标签,展示特定数据
+
+
+
+
+
+
+- 支持查看不同训练步数下的PR曲线
+
+
+
+
+
+- X轴-时间显示类型有三种衡量尺度
+
+ - Step:迭代次数
+ - Walltime:训练绝对时间
+ - Relative:训练时长
+
+
+
+
+
+## High Dimensional -- 数据降维组件
+
+### 介绍
+
+High Dimensional 组件将高维数据进行降维展示,用于深入分析高维数据间的关系。目前支持以下两种降维算法:
+
+ - PCA : Principle Component Analysis 主成分分析
+ - t-SNE : t-distributed stochastic neighbor embedding t-分布式随机领域嵌入
+
+### 记录接口
+
+High Dimensional 组件的记录接口如下:
```python
-# coding=utf-8
-import paddle.fluid as fluid
-
-
-def lenet_5(img):
- '''
- 定义神经网络结构
- '''
- conv1 = fluid.nets.simple_img_conv_pool(
- input=img,
- filter_size=5,
- num_filters=20,
- pool_size=2,
- pool_stride=2,
- act="relu")
-
- conv1_bn = fluid.layers.batch_norm(input=conv1)
-
- conv2 = fluid.nets.simple_img_conv_pool(
- input=conv1_bn,
- filter_size=5,
- num_filters=50,
- pool_size=2,
- pool_stride=2,
- act="relu")
-
- predition = fluid.layers.fc(input=conv2, size=10, act="softmax")
- return predition
-
-
-# 变量赋值
-image = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32")
-predition = lenet_5(image)
-
-place = fluid.CPUPlace()
-exe = fluid.Executor(place=place)
-exe.run(fluid.default_startup_program())
-
-# 使用函数 save_inference_model() 保存 paddle 模型
-fluid.io.save_inference_model(
- "./paddle_lenet_5_model",
- feeded_var_names=[image.name],
- target_vars=[predition],
- executor=exe)
+add_embeddings(tag, labels, hot_vectors, walltime=None)
```
-运行上述程序后,在命令行中执行
+接口参数说明如下:
+
+| 参数 | 格式 | 含义 |
+| ----------- | ------------------- | ---------------------------------------------------- |
+| tag | string | 记录指标的标志,如`default`,不能含有`%` |
+| labels | numpy.array 或 list | 一维数组表示的标签,每个元素是一个string类型的字符串 |
+| hot_vectors | numpy.array or list | 与labels一一对应,每个元素可以看作是某个标签的特征 |
+| walltime | int | 记录数据的时间戳,默认为当前时间戳 |
+
+### Demo
+
+下面展示了使用 High Dimensional 组件记录数据的示例,代码文件请见[High Dimensional组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py)
+
+```python
+from visualdl import LogWriter
+
+
+if __name__ == '__main__':
+ hot_vectors = [
+ [1.3561076367500755, 1.3116267195134017, 1.6785401875616097],
+ [1.1039614644440658, 1.8891609992484688, 1.32030488587171],
+ [1.9924524852447711, 1.9358920727142739, 1.2124401279391606],
+ [1.4129542689796446, 1.7372166387197474, 1.7317806077076527],
+ [1.3913371800587777, 1.4684674577930312, 1.5214136352476377]]
+
+ labels = ["label_1", "label_2", "label_3", "label_4", "label_5"]
+ # 初始化一个记录器
+ with LogWriter(logdir="./log/high_dimensional_test/train") as writer:
+ # 将一组labels和对应的hot_vectors传入记录器进行记录
+ writer.add_embeddings(tag='default',
+ labels=labels,
+ hot_vectors=hot_vectors)
+```
+
+运行上述程序后,在命令行执行
```shell
-visualdl --logdir ./log --host 0.0.0.0 --port 8080 --model_pb paddle_lenet_5_model
+visualdl --logdir ./log --port 8080
```
-接着在浏览器打开[http://0.0.0.0:8080](http://0.0.0.0:8080),点击页面最上方的`GRAPHS`选项,即可查看 Lenet-5 的模型结构。
+接着在浏览器打开`http://127.0.0.1:8080`,即可查看降维后的可视化数据。
-
-图8. graph 组件展示 Lenet-5 的模型结构
+
+
+
+
+
+
+#
diff --git a/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst b/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst
index 9995734289f4a65c59d72efe21b5e3dcc496820d..0f46dced718ddf34cdab23eae029254f904ae3e7 100644
--- a/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst
+++ b/doc/fluid/advanced_guide/evaluation_debugging/evaluation/metrics.rst
@@ -17,6 +17,7 @@ paddle.fluid.metrics模块提供了一系列常用的模型评价指标; 用户
不同类型的任务,会选用不同的评价指标。
回归问题通常会用RMSE(均方根误差)、MAE(平均绝对误差)、R-Square(R平方)等
+
AUC(Area Under Cure)指标则常被用在分类任务(classification)上
目标检测任务(Object Detection)则经常会用到mAP(Mean Average Precision)
diff --git a/doc/fluid/advanced_guide/flags/flags_cn.rst b/doc/fluid/advanced_guide/flags/flags_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d0d414725666c1d90f5d58c26dc4536f08f439f
--- /dev/null
+++ b/doc/fluid/advanced_guide/flags/flags_cn.rst
@@ -0,0 +1,28 @@
+
+环境变量FLAGS
+==================
+
+调用说明
+----------
+
+PaddlePaddle中的环境变量FLAGS支持两种设置方式。
+
+- 通过export来设置环境变量,如 :code:`export FLAGS_eager_delete_tensor_gb = 1.0` 。
+
+- 通过API::code:`get_flag` 和 :code:`set_flags` 来打印和设置环境变量FLAGS。API使用详情请参考 :ref:`cn_api_fluid_get_flags` 与 :ref:`cn_api_fluid_set_flags` 。
+
+
+环境变量FLAGS功能分类
+----------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ cudnn_cn.rst
+ data_cn.rst
+ debug_cn.rst
+ device_cn.rst
+ distributed_cn.rst
+ executor_cn.rst
+ memory_cn.rst
+ others_cn.rst
diff --git a/doc/fluid/advanced_guide/flags/flags_en.rst b/doc/fluid/advanced_guide/flags/flags_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b24c551c78d7bc74a76901c717b792f78b4237e3
--- /dev/null
+++ b/doc/fluid/advanced_guide/flags/flags_en.rst
@@ -0,0 +1,74 @@
+==================
+FLAGS
+==================
+
+Usage
+------
+These FLAGS in PaddlePaddle can be set in two ways.
+
+- Set the FLAGS through export. For example: :code:`export FLAGS_eager_delete_tensor_gb = 1.0` .
+
+- Through :code:`get_flags` and :code:`set_flags` to print and set the environment variables. For more information of using these API, please refer to :ref:`api_fluid_get_flags` and :ref:`api_fluid_get_flags` .
+
+
+FLAGS Quick Search
+------------------
+
+.. toctree::
+ :maxdepth: 1
+
+
+ cudnn_en.rst
+ data_en.rst
+ debug_en.rst
+ device_en.rst
+ distributed_en.rst
+ executor_en.rst
+ memory_en.rst
+ others_en.rst
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/fluid/advanced_guide/flags/memory_cn.rst b/doc/fluid/advanced_guide/flags/memory_cn.rst
index cbafa94a0e5b28570cbb16a92f17a947bd3458fd..94676721c2d0baca9a2d744e7dbc7064c7eed279 100644
--- a/doc/fluid/advanced_guide/flags/memory_cn.rst
+++ b/doc/fluid/advanced_guide/flags/memory_cn.rst
@@ -11,13 +11,14 @@ FLAGS_allocator_strategy
取值范围
---------------
-String型,['naive_best_fit', 'auto_growth']中的一个。缺省值为'naive_best_fit'。
+String型,['naive_best_fit', 'auto_growth']中的一个。缺省值如果编译Paddle CMake时使用-DON_INFER=ON为'naive_best_fit'。
+其他默认情况为'auto_growth'。PaddlePaddle pip安装包的默认策略也是'auto_growth'
示例
--------
-FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器。
+FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器,PaddlePaddle会先占用大多比例的可用内存/显存,在Paddle具体数据使用时分配,这种方式预占空间较大,但内存/显存碎片较少(比如能够支持模型的最大batch size会变大)。
-FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。
+FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。PaddlePaddle会随着真实数据需要再占用内存/显存,但内存/显存可能会产生碎片(比如能够支持模型的最大batch size会变小)。
FLAGS_eager_delete_scope
diff --git a/doc/fluid/advanced_guide/flags/memory_en.rst b/doc/fluid/advanced_guide/flags/memory_en.rst
index 8702a4082006ab05b0a983f3b117fba7617b558f..0e630e7d93d51e668397b9c88fbfd75ad45f9395 100644
--- a/doc/fluid/advanced_guide/flags/memory_en.rst
+++ b/doc/fluid/advanced_guide/flags/memory_en.rst
@@ -11,13 +11,13 @@ Use to choose allocator strategy of PaddlePaddle.
Values accepted
---------------
-String, enum in ['naive_best_fit', 'auto_growth']. The default value is 'naive_best_fit'.
+String, enum in ['naive_best_fit', 'auto_growth']. The default value will be 'naive_best_fit' if users compile PaddlePaddle with -DON_INFER=ON CMake flag, otherwise is 'auto_growth'. The default PaddlePaddle pip package uses 'auto_growth'.
Example
--------
-FLAGS_allocator_strategy=naive_best_fit would use the pre-allocated best fit allocator.
+FLAGS_allocator_strategy=naive_best_fit would use the pre-allocated best fit allocator. 'naive_best_fit' strategy would occupy almost all GPU memory by default but leads to less memory fragmentation (i.e., maximum batch size of models may be larger).
-FLAGS_allocator_strategy=auto_growth would use the auto growth allocator.
+FLAGS_allocator_strategy=auto_growth would use the auto growth allocator. 'auto_growth' strategy would allocate GPU memory on demand but may lead to more memory fragmentation (i.e., maximum batch size of models may be smaller).
diff --git a/doc/fluid/advanced_guide/flags_cn.rst b/doc/fluid/advanced_guide/flags_cn.rst
deleted file mode 100644
index 46abff4b275abbc723cd49db49a9d277bd804f80..0000000000000000000000000000000000000000
--- a/doc/fluid/advanced_guide/flags_cn.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-
-环境变量FLAGS
-==================
-
-
-.. toctree::
- :maxdepth: 1
-
-
- flags/cudnn_cn.rst
- flags/data_cn.rst
- flags/debug_cn.rst
- flags/device_cn.rst
- flags/distributed_cn.rst
- flags/executor_cn.rst
- flags/memory_cn.rst
- flags/others_cn.rst
diff --git a/doc/fluid/advanced_guide/flags_en.rst b/doc/fluid/advanced_guide/flags_en.rst
deleted file mode 100644
index 9c8c3d621ebca52a15a8b61f53c2d090a124f875..0000000000000000000000000000000000000000
--- a/doc/fluid/advanced_guide/flags_en.rst
+++ /dev/null
@@ -1,63 +0,0 @@
-==================
-FLAGS
-==================
-
-
-.. toctree::
- :maxdepth: 1
-
-
- flags/cudnn_en.rst
- flags/data_en.rst
- flags/debug_en.rst
- flags/device_en.rst
- flags/distributed_en.rst
- flags/executor_en.rst
- flags/memory_en.rst
- flags/others_en.rst
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/doc/fluid/advanced_guide/index_cn.rst b/doc/fluid/advanced_guide/index_cn.rst
index d0364810b95b467a0c333790e8412971dfe32105..4fa3243cea9d68126fb59fe769c8d3a0bdc2f008 100644
--- a/doc/fluid/advanced_guide/index_cn.rst
+++ b/doc/fluid/advanced_guide/index_cn.rst
@@ -2,31 +2,14 @@
进阶指南
########
-如果您已比较熟练使用飞桨来完成常规任务,期望了解更多飞桨在工业部署方面的能力,或者尝试自己的二次开发,请阅读:
+如果您已经学会使用飞桨来完成常规任务,期望了解更多飞桨在工业部署方面的能力,请阅读:
- - `数据准备 <../advanced_guide/data_preparing/index_cn.html>`_:介绍高效的同步异步数据读取方法
-
- - `分布式训练 <../advanced_guide/distributed_training/index_cn.html>`_ :介绍如何使用分布式训练
- `预测与部署 <../advanced_guide/inference_deployment/index_cn.html>`_ :介绍如何应用训练好的模型进行预测
- - `性能调优 <../advanced_guide/performance_improving/index_cn.html>`_ :介绍飞桨使用过程中的调优方法
-
- - `模型评估/调试 <../advanced_guide/evaluation_debugging/index_cn.html>`_ :介绍模型评估与调试的典型方法
-
- - `二次开发 <../advanced_guide/addon_development/index_cn.html>`_ :介绍如何新增Operator和如何向飞桨开源社区贡献代码
-
- - `环境变量FLAGS <../advanced_guide/flags/index_cn.html>`_
-
-
.. toctree::
:hidden:
- data_preparing/index_cn.rst
- distributed_training/index_cn.rst
+ dygraph_to_static/index_cn.rst
inference_deployment/index_cn.rst
- performance_improving/index_cn.rst
- evaluation_debugging/index_cn.rst
- addon_development/index_cn.rst
- flags_cn.rst
-
+ flags/flags_cn.rst
diff --git a/doc/fluid/advanced_guide/index_en.rst b/doc/fluid/advanced_guide/index_en.rst
index a3201508de5e4218e78c81245708f9d7fd21b6a8..f65d0ce22e3520de008a6f0706c8fb4b4483cfee 100644
--- a/doc/fluid/advanced_guide/index_en.rst
+++ b/doc/fluid/advanced_guide/index_en.rst
@@ -8,30 +8,14 @@ Advanced User Guides
So far you have already been familiar with PaddlePaddle. And the next expectation, read more on:
- - `Prepare Data `_:How to prepare the data efficiently.
-
- - `Distributed Training `_ :How to apply the distributed training in your projects.
- `Deploy Inference Model `_ :How to deploy the trained network to perform practical inference
- - `Practice Improving `_ :How to do profiling for Fluid programs
-
- - `Model Evaluation and Debugging `_ :How to evaluate your program.
-
- - `Addon Development `_ :How to contribute codes and documentation to our communities
-
- - `FLAGS `_
-
.. toctree::
:hidden:
- data_preparing/index_en.rst
- distributed_training/index_en.rst
+ dygraph_to_static/index_en.rst
inference_deployment/index_en.rst
- performance_improving/index_en.rst
- evaluation_debugging/index_en.rst
- addon_development/index_en.rst
- flags_en.rst
-
+ flags/flags_en.rst
diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst
index 06630753d5c27eeff84806be902b586b1c563368..788341863e1fe669ab10bc634d948fa7c6ef481c 100644
--- a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst
+++ b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.rst
@@ -1,23 +1,22 @@
.. _install_or_build_cpp_inference_lib:
-安装与编译C++预测库
+安装与编译 Linux 预测库
===========================
直接下载安装
-------------
.. csv-table::
- :header: "版本说明", "预测库(1.7.0版本)", "预测库(develop版本)"
+ :header: "版本说明", "预测库(1.8.4版本)", "预测库(develop版本)"
:widths: 3, 2, 2
- "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda9.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_",
- "ubuntu14.04_cuda10.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_",
- "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_",
+ "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_",
+ "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_",
从源码编译
@@ -40,23 +39,28 @@ WITH_NV_JETSON OFF 在NV Jetson硬件上编译时需
建议按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。
-首先从github拉取最新代码并安装nccl
+首先从github拉取最新代码
.. code-block:: bash
- git clone https://github.com/paddlepaddle/paddle
+ git clone https://github.com/paddlepaddle/Paddle
+ cd Paddle
# 建议使用git checkout切换到Paddle稳定的版本,如:
- git checkout v1.6.2
+ git checkout v1.8.4
+
+**note**: 如果您是多卡机器,建议安装NCCL;如果您是单卡机器则可以在编译时显示指定WITH_NCCL=OFF来跳过这一步。注意如果WITH_NCCL=ON,且没有安装NCCL,则编译会报错。
+
+.. code-block:: bash
git clone https://github.com/NVIDIA/nccl.git
+ cd nccl
make -j4
make install
-**note**: 单卡机器上不会用到nccl但仍存在依赖, 后续会考虑将此依赖去除。
**Server端预测库源码编译**
-下面的代码片段配制编译选项并进行编译(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径):
+下面的代码片段配制编译选项并进行编译(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径,WITH_NCCL根据实际情况进行修改):
.. code-block:: bash
@@ -70,6 +74,7 @@ WITH_NV_JETSON OFF 在NV Jetson硬件上编译时需
-DWITH_MKL=OFF \
-DWITH_GPU=OFF \
-DON_INFER=ON \
+ -DWITH_NCCL=OFF \
..
make
make inference_lib_dist
@@ -118,7 +123,7 @@ NVIDIA Jetson是NVIDIA推出的嵌入式AI平台,Paddle Inference支持在 NVI
make inference_lib_dist -j4
3. 样例测试
- 请参照官网样例:https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_usage/deploy/inference/paddle_tensorrt_infer.html#id2
+ 请参照官网样例:https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2
**FAQ**
@@ -165,28 +170,21 @@ NVIDIA Jetson是NVIDIA推出的嵌入式AI平台,Paddle Inference支持在 NVI
│ ├── libpaddle_fluid.a
│ └── libpaddle_fluid.so
├── third_party
- │ ├── boost
- │ │ └── boost
- │ ├── eigen3
- │ │ ├── Eigen
- │ │ └── unsupported
│ └── install
│ ├── gflags
│ ├── glog
│ ├── mkldnn
│ ├── mklml
- │ ├── protobuf
- │ ├── xxhash
- │ └── zlib
+ │ └── protobuf
└── version.txt
version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如:
.. code-block:: text
- GIT COMMIT ID: cc9028b90ef50a825a722c55e5fda4b7cd26b0d6
+ GIT COMMIT ID: 0231f58e592ad9f673ac1832d8c495c8ed65d24f
WITH_MKL: ON
WITH_MKLDNN: ON
WITH_GPU: ON
- CUDA version: 8.0
+ CUDA version: 10.1
CUDNN version: v7
diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst
index 43422f0820a37149b7b9072250283984f2463503..9ed8bc9c8da226bb20dd987fc64f7070a5ba89b7 100644
--- a/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst
+++ b/doc/fluid/advanced_guide/inference_deployment/inference/build_and_install_lib_en.rst
@@ -1,23 +1,22 @@
.. _install_or_build_cpp_inference_lib_en:
-Install and Compile C++ Inference Library
+Install and Compile C++ Inference Library on Linux
=============================================
Direct Download and Installation
---------------------------------
.. csv-table:: c++ inference library list
- :header: "version description", "inference library(1.7.0 version)", "inference library(develop version)"
+ :header: "version description", "inference library(1.8.4 version)", "inference library(develop version)"
:widths: 3, 2, 2
- "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
- "ubuntu14.04_cuda9.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_",
- "ubuntu14.04_cuda10.0_cudnn7_avx_mkl_trt5", "`fluid_inference.tgz `_",
- "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_",
+ "ubuntu14.04_cpu_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cpu_avx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cpu_noavx_openblas", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda9.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda10.0_cudnn7_avx_mkl", "`fluid_inference.tgz `_", "`fluid_inference.tgz `_"
+ "ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6", "`fluid_inference.tgz `_",
+ "nv-jetson-cuda10-cudnn7.5-trt5", "`fluid_inference.tar.gz `_",
Build from Source Code
-----------------------
@@ -41,23 +40,29 @@ WITH_NV_JETSON OFF build inference libs on NV Jetson
It is recommended to configure options according to the recommended values to avoid linking unnecessary libraries. Other options can be set if it is necessary.
-Firstly we pull the latest code from github and install nccl.
+Firstly we pull the latest code from github.
.. code-block:: bash
- git clone https://github.com/paddlepaddle/paddle
- # Use git checkout to switch to stable versions such as v1.6.2
- git checkout v1.6.2
+ git clone https://github.com/paddlepaddle/Paddle
+ cd Paddle
+ # Use git checkout to switch to stable versions such as v1.8.4
+ git checkout v1.8.4
+
+
+**note**: If your environment is a multi-card machine, it is recommended to install nccl; otherwise, you can skip this step by specifying WITH_NCCL = OFF during compilation. Note that if WITH_NCCL = ON, and NCCL is not installed, the compiler will report an error.
+
+.. code-block:: bash
git clone https://github.com/NVIDIA/nccl.git
+ cd nccl
make -j4
make install
-**note**: nccl is not used but still needed in building. This dependence will be removed later.
**build inference libs on server**
-Following codes set the configurations and execute building(PADDLE_ROOT should be set to the actual installing path of inference libs).
+Following codes set the configurations and execute building(PADDLE_ROOT should be set to the actual installing path of inference libs, WITH_NCCL should be modified according to the actual environment.).
.. code-block:: bash
@@ -72,6 +77,7 @@ Following codes set the configurations and execute building(PADDLE_ROOT should b
-DWITH_MKL=OFF \
-DWITH_GPU=OFF \
-DON_INFER=ON \
+ -DWITH_NCCL=OFF \
..
make
make inference_lib_dist
@@ -121,7 +127,7 @@ NVIDIA Jetson is an AI computing platform in embedded systems introduced by NVID
make inference_lib_dist -j4
3. Test with samples
- Please refer to samples on https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_usage/deploy/inference/paddle_tensorrt_infer.html#id2
+ Please refer to samples on https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/inference_improving/paddle_tensorrt_infer.html#id2
**FAQ**
diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md b/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md
index 95c338feb4674c8eb2e6a1d0d5ea883d523b43bf..6f031621fc06570581de1c69ff3a19e015389162 100644
--- a/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md
+++ b/doc/fluid/advanced_guide/inference_deployment/inference/c_infer_cn.md
@@ -27,7 +27,7 @@ Fluid提供了高度优化的[C++预测库](./native_infer.html),为了方便
* `void PD_DisableGpu(PD_AnalysisConfig* config)`: 禁用GPU。
* `int PD_GpuDeviceId(const PD_AnalysisConfig* config)`: 返回使用的GPU设备的ID。
* `void PD_SwitchIrOptim(PD_AnalysisConfig* config, bool x)`: 设置预测是否开启IR优化。
-* `void PD_EnableTensorRtEngine(PD_AnalysisConfig* config, int workspace_size, int max_batch_size, int min_subgraph_size, Precision precision, bool use_static, bool use_calib_mode)`: 开启TensorRT。关于参数的解释,详见``使用Paddle-TensorRT库预测``。
+* `void PD_EnableTensorRtEngine(PD_AnalysisConfig* config, int workspace_size, int max_batch_size, int min_subgraph_size, Precision precision, bool use_static, bool use_calib_mode)`: 开启TensorRT。关于参数的解释,详见[使用Paddle-TensorRT库预测](../../performance_improving/inference_improving/paddle_tensorrt_infer.html)。
* `void PD_EnableMKLDNN(PD_AnalysisConfig* config)`: 开启MKLDNN。
#### 代码示例
diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md
index b8b051a1311938ee53f58b86c1d794f9d225e86d..8d86bca09b809bde9f779151656b824fbd7efff3 100644
--- a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md
+++ b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference.md
@@ -1,18 +1,17 @@
-安装与编译Windows预测库
+安装与编译 Windows 预测库
===========================
下载安装包与对应的测试环境
-------------
-| 版本说明 | 预测库(1.7.0版本) | 编译器 | 构建工具 | cuDNN | CUDA |
+| 版本说明 | 预测库(1.8.4版本) | 编译器 | 构建工具 | cuDNN | CUDA |
|:---------|:-------------------|:-------------------|:----------------|:--------|:-------|
-| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
-| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
-| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 |
-| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 |
-| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 |
-| cuda10.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 |
+| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
+| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
+| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 |
+| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 |
+| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 |
### 硬件环境
diff --git a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md
index 480a4563b8413d219cc7028800f1625744b0837e..fc85af2ef49a32f935cb25cc7504f4e933e3a320 100644
--- a/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md
+++ b/doc/fluid/advanced_guide/inference_deployment/inference/windows_cpp_inference_en.md
@@ -5,14 +5,13 @@ Install and Compile C++ Inference Library on Windows
Direct Download and Install
-------------
-| Version | Inference Libraries(v1.7.0) | Compiler | Build tools | cuDNN | CUDA |
+| Version | Inference Libraries(v1.8.4) | Compiler | Build tools | cuDNN | CUDA |
|:---------|:-------------------|:-------------------|:----------------|:--------|:-------|
-| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
-| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
-| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 |
-| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 |
-| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 |
-| cuda10.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.7.0/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 |
+| cpu_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
+| cpu_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3| CMake v3.16.0 |
+| cuda9.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 |
+| cuda9.0_cudnn7_avx_openblas | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.3.1 | 9.0 |
+| cuda10.0_cudnn7_avx_mkl | [fluid_inference.zip](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 10.0 |
### Hardware Environment
diff --git a/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md b/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md
index 66c7d3222803abbdc0723b94085f85b6daa9cb5a..e977a8d7d4e95d50a0ecad3235bb89a8fc9ef7b5 100644
--- a/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md
+++ b/doc/fluid/advanced_guide/inference_deployment/mobile/mobile_index.md
@@ -1,8 +1,8 @@
# Paddle-Lite
-Paddle Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。
+Paddle-Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。
-完整使用文档位于 [PaddleLite 文档](https://paddlepaddle.github.io/Paddle-Lite/) 。
+完整使用文档位于 [Paddle-Lite 文档](https://paddle-lite.readthedocs.io/zh/latest/) 。
## 特性
@@ -13,39 +13,39 @@ Paddle Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在
### 高性能
极致的 ARM CPU 性能优化,针对不同微架构特点实现kernel的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。
-支持INT8量化计算,结合 [PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中 INT8量化训练功能,可以提供高精度高性能的预测能力。
+支持量化模型,结合[PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中量化功能,可以提供高精度高性能的预测能力。
在Huawei NPU, FPGA上也具有有很好的性能表现。
-最新 Benchmark 位于 [benchmark](https://paddlepaddle.github.io/Paddle-Lite/develop/benchmark/)。
+最新性能数据位于 [Benchmark 文档](https://paddle-lite.readthedocs.io/zh/latest/benchmark/benchmark.html)。
### 通用性
-硬件方面,Paddle Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。
+硬件方面,Paddle-Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。
-模型支持方面,Paddle Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。
+模型支持方面,Paddle-Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。
-框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过X2Paddle (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。
+框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过[X2Paddle] (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。
## 架构
-PaddleLite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。
+Paddle-Lite 的架构设计着重考虑了对多硬件和平台的支持,并且强化了多个硬件在一个模型中混合执行的能力,多个层面的性能优化处理,以及对端侧应用的轻量化设计。
![](https://github.com/Superjomn/_tmp_images/raw/master/images/paddle-lite-architecture.png)
其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。
-## Paddle-Mobile升级为Paddle Lite的说明
+## Paddle-Mobile升级为Paddle-Lite的说明
原Paddle-Mobile作为一个致力于嵌入式平台的PaddlePaddle预测引擎,已支持多种硬件平台,包括ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的GPU Metal实现、ZU5、ZU9等FPGA开发板、树莓派等arm-linux开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/mobile/README.md)
-Paddle-Mobile 整体升级重构并更名为Paddle Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。
+Paddle-Mobile 整体升级重构并更名为Paddle-Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。
metal, web的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的GPU Metal实现的需求及web前端预测需求,可以直接进入这两个目录。
## 致谢
-Paddle Lite 借鉴了以下开源项目:
+Paddle-Lite 借鉴了以下开源项目:
- [ARM compute library](https://github.com/ARM-software/ComputeLibrary)
-- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。
+- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle-Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle-Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。
## 交流与反馈
* 欢迎您通过Github Issues来提交问题、报告与建议
diff --git a/doc/fluid/advanced_guide/performance_improving/amp/amp.md b/doc/fluid/advanced_guide/performance_improving/amp/amp.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a41a447f78cf3bc119abb7754292edbbc23050a
--- /dev/null
+++ b/doc/fluid/advanced_guide/performance_improving/amp/amp.md
@@ -0,0 +1,171 @@
+# 混合精度训练最佳实践
+
+Automatic Mixed Precision (AMP) 是一种自动混合使用半精度(FP16)和单精度(FP32)来加速模型训练的技术。AMP技术可方便用户快速将使用 FP32 训练的模型修改为使用混合精度训练,并通过黑白名单和动态`loss scaling`来保证训练时的数值稳定性进而避免梯度Infinite或者NaN(Not a Number)。借力于新一代NVIDIA GPU中Tensor Cores的计算性能,PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练加速比可达1.5~2.9。
+
+### 半精度浮点类型FP16
+
+如图 1 所示,半精度(Float Precision16,FP16)是一种相对较新的浮点类型,在计算机中使用2字节(16位)存储。在IEEE 754-2008标准中,它亦被称作binary16。与计算中常用的单精度(FP32)和双精度(FP64)类型相比,FP16更适于在精度要求不高的场景中使用。
+
+
+
+ 图 1. 半精度和单精度数据示意图
+
+
+### 英伟达GPU的FP16算力
+
+在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的Volta及Turing架构GPU在使用FP16计算时具有如下特点:
+
+* FP16可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的batch size大小。
+* FP16可以充分利用英伟达Volta及Turing架构GPU提供的Tensor Cores技术。在相同的GPU硬件上,Tensor Cores的FP16计算吞吐量是FP32的8倍。
+
+### PaddlePaddle AMP功能——牛刀小试
+
+如前文所述,使用FP16数据类型可能会造成计算精度上的损失,但对深度学习领域而言,并不是所有计算都要求很高的精度,一些局部的精度损失对最终训练效果影响很微弱,却能使吞吐和训练速度带来大幅提升。因此,混合精度计算的需求应运而生。具体而言,训练过程中将一些对精度损失不敏感且能利用Tensor Cores进行加速的运算使用半精度处理,而对精度损失敏感部分依然保持FP32计算精度,用以最大限度提升访存和计算效率。
+
+为了避免对每个具体模型人工地去设计和尝试精度混合的方法,PaddlePaadle框架提供自动混合精度训练(AMP)功能,解放"炼丹师"的双手。在PaddlePaddle中使用AMP训练是一件十分容易的事情,用户只需要增加一行代码即可将原有的FP32训练转变为AMP训练。下面以`MNIST`为例介绍PaddlePaddle AMP功能的使用示例。
+
+**MNIST网络定义**
+
+```python
+import paddle.fluid as fluid
+
+def MNIST(data, class_dim):
+ conv1 = fluid.layers.conv2d(data, 16, 5, 1, act=None, data_format='NHWC')
+ bn1 = fluid.layers.batch_norm(conv1, act='relu', data_layout='NHWC')
+ pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2, data_format='NHWC')
+ conv2 = fluid.layers.conv2d(pool1, 64, 5, 1, act=None, data_format='NHWC')
+ bn2 = fluid.layers.batch_norm(conv2, act='relu', data_layout='NHWC')
+ pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2, data_format='NHWC')
+ fc1 = fluid.layers.fc(pool2, size=64, act='relu')
+ fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax')
+ return fc2
+```
+
+针对CV(Computer Vision)类模型组网,为获得更高的训练性能需要注意如下三点:
+
+* `conv2d`、`batch_norm`以及`pool2d`等需要将数据布局设置为`NHWC`,这样有助于使用TensorCore技术加速计算过程1 。
+* Tensor Cores要求在使用FP16加速卷积运算时conv2d的输入/输出通道数为8的倍数2 ,因此设计网络时推荐将conv2d层的输入/输出通道数设置为8的倍数。
+* Tensor Cores要求在使用FP16加速矩阵乘运算时矩阵行数和列数均为8的倍数3 ,因此设计网络时推荐将fc层的size参数设置为8的倍数。
+
+
+**FP32 训练**
+
+为了训练 MNIST 网络,还需要定义损失函数来更新权重参数,此处使用的优化器是SGDOptimizer。为了简化说明,这里省略了迭代训练的相关代码,仅体现损失函数及优化器定义相关的内容。
+
+```python
+import paddle
+import numpy as np
+
+data = fluid.layers.data(
+ name='image', shape=[None, 28, 28, 1], dtype='float32')
+label = fluid.layers.data(name='label', shape=[None, 1], dtype='int64')
+
+out = MNIST(data, class_dim=10)
+loss = fluid.layers.cross_entropy(input=out, label=label)
+avg_loss = fluid.layers.mean(loss)
+
+sgd = fluid.optimizer.SGDOptimizer(learning_rate=1e-3)
+sgd.minimize(avg_loss)
+```
+
+**AMP训练**
+
+与FP32训练相比,用户仅需使用PaddlePaddle提供的`fluid.contrib.mixed_precision.decorate` 函数将原来的优化器SGDOptimizer进行封装,然后使用封装后的优化器(mp_sgd)更新参数梯度即可完成向AMP训练的转换,代码如下所示:
+
+```python
+sgd = SGDOptimizer(learning_rate=1e-3)
+# 此处只需要使用fluid.contrib.mixed_precision.decorate将sgd封装成AMP训练所需的
+# 优化器mp_sgd,并使用mp_sgd.minimize(avg_loss)代替原来的sgd.minimize(avg_loss)语句即可。
+mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd)
+mp_sgd.minimize(avg_loss)
+```
+
+运行上述混合精度训练python脚本时为得到更好的执行性能可配置如下环境参数,并保证cudnn版本在7.4.1及以上。
+
+```shell
+export FLAGS_conv_workspace_size_limit=1024 # MB,根据所使用的GPU显存容量及模型特点设置数值,值越大越有可能选择到更快的卷积算法
+export FLAGS_cudnn_exhaustive_search=1 # 使用穷举搜索方法来选择快速卷积算法
+export FLAGS_cudnn_batchnorm_spatial_persistent=1 # 用于触发batch_norm和relu的融合
+```
+
+上述即为最简单的PaddlePaddle AMP功能使用方法。ResNet50模型的AMP训练示例可[点击此处](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E8%AE%AD%E7%BB%83)查看,其他模型使用PaddlePaddle AMP的方法也与此类似。若AMP训练过程中出现连续的loss nan等不收敛现象,可尝试使用[check nan inf工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/flags/check_nan_inf_cn.html#span-id-speed-span)进行调试。
+
+
+### PaddlePaddle AMP功能——进阶使用
+
+上一小节所述均为默认AMP训练行为,用户当然也可以改变一些默认的参数设置来满足特定的模型训练场景需求。接下来的章节将介绍PaddlePaddle AMP功能使用中用户可配置的参数行为,即进阶使用技巧。
+
+#### 自定义黑白名单
+
+PaddlePaddle AMP功能实现中根据FP16数据类型计算稳定性和加速效果在框架内部定义了算子(Op)的黑白名单。具体来说,将对FP16计算友好且能利用Tensor Cores的Op归类于白名单,将使用FP16计算会导致数值不稳定的Op归类于黑名单,将对FP16计算没有多少影响的Op归类于灰名单。然而,框架开发人员不可能考虑到所有的网络模型情况,尤其是那些特殊场景中使用到的模型。用户可以在使用`fluid.contrib.mixed_precision.decorate` 函数时通过指定自定义的黑白名单列表来改变默认的FP16计算行为。
+
+```python
+sgd = SGDOptimizer(learning_rate=1e-3)
+# list1是白名单op列表,list2是黑名单op列表,list3是黑名单var_name列表(凡是以这些黑名单var_name为输入或输出的op均会被视为黑名单op)
+amp_list = AutoMixedPrecisionLists(custom_white_list=list1, custom_black_list=list2, custom_black_varnames=list3)
+mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, amp_list)
+mp_sgd.minimize(avg_loss)
+```
+
+#### 自动loss scaling
+
+为了避免梯度Infinite或者NAN,PaddlePaddle AMP功能支持根据训练过程中梯度的数值自动调整loss scale值。用户在使用`fluid.contrib.mixed_precision.decorate` 函数时也可以改变与loss scaling相关的参数设置,示例如下:
+
+```python
+sgd = SGDOptimizer(learning_rate=1e-3)
+mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd,
+ amp_lists=None,
+ init_loss_scaling=2**8,
+ incr_every_n_steps=500,
+ decr_every_n_nan_or_inf=4,
+ incr_ratio=2.0,
+ decr_ratio=0.5,
+ use_dynamic_loss_scaling=True)
+mp_sgd.minimize(avg_loss)
+```
+
+`init_loss_scaling `、`incr_every_n_steps` 以及`decr_every_n_nan_or_inf`等参数控制着自动loss scaling的行为。它们仅当 `use_dynamic_loss_scaling`设置为True时有效。下面详述这些参数的意义:
+
+* init_loss_scaling(float):初始loss scaling值。
+* incr_every_n_steps(int):每经过incr_every_n_steps个连续的正常梯度值才会增大loss scaling值。
+* decr_every_n_nan_or_inf(int):每经过decr_every_n_nan_or_inf个连续的无效梯度值(nan或者inf)才会减小loss scaling值。
+* incr_ratio(float):每次增大loss scaling值的扩增倍数,其为大于1的浮点数。
+* decr_ratio(float):每次减小loss scaling值的比例系数,其为小于1的浮点数。
+
+### 多卡GPU训练的优化
+
+PaddlePaddle AMP功能对多卡GPU训练进行了深度优化。如图 2 所示,优化之前的参数梯度更新特点:梯度计算时虽然使用的是FP16数据类型,但是不同GPU卡之间的梯度传输数据类型仍为FP32。
+
+
+
+ 图 2. 不同GPU卡之间传输梯度使用FP32数据类型(优化前)
+
+
+为了降低GPU多卡之间的梯度传输带宽,我们将梯度传输提前至`Cast`操作之前,而每个GPU卡在得到对应的FP16梯度后再执行`Cast`操作将其转变为FP32类型,具体操作详见图2。这一优化在训练大模型时对减少带宽占用尤其有效,如多卡训练BERT-Large模型。
+
+
+
+ 图 3. 不同GPU卡之间传输梯度使用FP16数据类型(优化后)
+
+
+### 训练性能对比(AMP VS FP32)
+
+PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练上均有可观的加速比,下面是ResNet50和ERNIE Large模型的AMP训练相对于FP32训练的加速效果。
+
+
+图 4. Paddle AMP训练加速效果(横坐标为卡数,如8*8代表8机8卡)
+
+
+
+
+
+
+从图4所示的图表可以看出,ResNet50的AMP训练相对与FP32训练加速比可达$2.8 \times$以上,而ERNIE Large的AMP训练相对与FP32训练加速比亦可达 $1.7 \times -- 2.1 \times$ 。
+
+### 参考文献
+
+* Mixed Precision Training
+* 使用自动混合精度加速 PaddlePaddle 训练
+* Tensor Layouts In Memory: NCHW vs NHWC ↩
+* Channels In And Out Requirements ↩
+* Matrix-Matrix Multiplication Requirements ↩
diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md
deleted file mode 100644
index 0e7196c5d22cefa041dcf3661221e4b24328ef56..0000000000000000000000000000000000000000
--- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/host_memory_profiling_cn.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# 堆内存分析和优化
-
-计算机程序都可能有内存泄漏的风险。**内存泄漏**一般是由于程序在堆(heap)上分配了内存而没有释放,随着程序的运行占用的内存越来越大,一方面会影响程序的稳定性,可能让运行速度越来越慢,或者造成oom,甚至会影响运行程序的机器的稳定性,造成宕机。
-
-
-目前有很多内存泄漏分析工具,比较经典的有[valgrind](http://valgrind.org/docs/manual/quick-start.html#quick-start.intro), [gperftools](https://gperftools.github.io/gperftools/)。
-
-因为Fluid是用Python驱动C++ core来运行,valgrind直接分析非常困难,需要自己编译debug版本的、带valgrind支持的专用Python版本,而且输出的信息中大部分是Python自己的符号和调用信息,分析起来很困难,另外使用valgrind会让程序运行速度变得非常慢,所以不建议使用。
-
-本教程主要介绍[gperftools](https://gperftools.github.io/gperftools/)的使用。
-
-gperftool主要支持以下四个功能:
-
-- thread-caching malloc
-- heap-checking using tcmalloc
-- heap-profiling using tcmalloc
-- CPU profiler
-
-Paddle也提供了基于gperftool的[CPU性能分析教程](./cpu_profiling_cn.html)。
-
-对于堆内存的分析,主要用到thread-caching malloc和heap-profiling using tcmalloc。
-
-## 环境
-
-本教程基于paddle提供的Docker开发环境paddlepaddle/paddle:latest-dev,基于Ubuntu 16.04.4 LTS环境。
-
-## 使用流程
-
-- 安装google-perftools
-
-```
-apt-get install libunwind-dev
-apt-get install google-perftools
-```
-
-- 安装pprof
-
-```
-go get -u github.com/google/pprof
-```
-
-- 设置运行环境
-
-```
-export PPROF_PATH=/root/gopath/bin/pprof
-export PPROF_BINARY_PATH=/root/gopath/bin/pprof
-export LD_PRELOAD=/usr/lib/libtcmalloc.so.4
-```
-
-- 使用heap profile来运行python程序。本质上是周期性的对堆的分配情况做一次快照。
-
-```
-# HEAPPROFILE 设置生成的堆分析文件的目录和文件前缀
-# HEAP_PROFILE_ALLOCATION_INTERVAL 设置每分配多少存储dump一次dump,默认1GB
-env HEAPPROFILE="./perf_log/test.log" HEAP_PROFILE_ALLOCATION_INTERVAL=209715200 python trainer.py
-```
-
-随着程序的运行,会在perf_log这个文件夹下生成很多文件,如下:
-
-```
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0001.heap
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0002.heap
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0003.heap
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0004.heap
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0005.heap
--rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0006.heap
-```
-
-- 使用pprof对heap文件进行分析。分析有两种模式:
- - 完整模式。会对当前heap做一个分析,显示目前分配内存一些调用路径。
-
- ```
- pprof --pdf python test.log.0012.heap
- ```
- 上述命令会生成一个profile00x.pdf的文件,可以直接打开,例如:[memory_cpu_allocator](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_cpu_allocator.pdf)。从下图可以看出,在CPU版本fluid的运行过程中,分配存储最多的模块式CPUAllocator. 而别的模块相对而言分配内存较少,所以被忽略了,这对于分配内存泄漏是很不方便的,因为泄漏是一个缓慢的过程,在这种图中是无法看到的。
-
- ![result](https://user-images.githubusercontent.com/3048612/40964027-a54033e4-68dc-11e8-836a-144910c4bb8c.png)
-
- - Diff模式。可以对两个时刻的heap做diff,把一些内存分配没有发生变化的模块去掉,而把增量部分显示出来。
- ```
- pprof --pdf --base test.log.0010.heap python test.log.1045.heap
- ```
- 生成的结果为:[`memory_leak_protobuf`](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_leak_protobuf.pdf)
-
- 从图中可以看出:ProgramDesc这个结构,在两个版本之间增长了200MB+,所以这里有很大的内存泄漏的可能性,最终结果也确实证明是这里造成了泄漏。
-
- ![result](https://user-images.githubusercontent.com/3048612/40964057-b434d5e4-68dc-11e8-894b-8ab62bcf26c2.png)
- ![result](https://user-images.githubusercontent.com/3048612/40964063-b7dbee44-68dc-11e8-9719-da279f86477f.png)
diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst b/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst
index 78d5992651dd24db0b1b13115b477fa750712f79..3bb5ba2c568fc5e6c78485c3cc60a66e3a2841bf 100644
--- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst
+++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/index_cn.rst
@@ -1,3 +1,5 @@
+.. _api_guide_analysis_tools:
+
###############
性能优化分析及工具
###############
diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md
index 1d4fcee690376125579aa5eacd4f7987f6671192..e40afcf3f4cc311747de9be5cbe9eacc2ca44175 100644
--- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md
+++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_cn.md
@@ -52,7 +52,6 @@ python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=time
1. 打开chrome浏览器,访问 ,用`load`按钮来加载生成的`timeline`文件。
- ![chrome tracing](../tracing.jpeg)
1. 结果如下图所示,可以放大来查看timeline的细节信息。
diff --git a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md
index 936695b2b82f0eb461f7cd415482ca81ca882e23..fb51802a168452a0649ebbcd0a6f4d37c07ea823 100644
--- a/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md
+++ b/doc/fluid/advanced_guide/performance_improving/analysis_tools/timeline_en.md
@@ -52,7 +52,6 @@ python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=time
3. Open chrome and visit , use `load` button to load the generated `timeline` file.
- ![chrome tracing](./tracing.jpeg)
diff --git a/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md b/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md
new file mode 100644
index 0000000000000000000000000000000000000000..c20f1abf113a51632d20eb1c2340b85cd3d67aa3
--- /dev/null
+++ b/doc/fluid/advanced_guide/performance_improving/device_switching/device_switching.md
@@ -0,0 +1,199 @@
+# 运行时设备切换
+
+Paddle提供了[fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)以及[fluid.CPUPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CPUPlace_cn.html)用于指定运行时的设备。这两个接口用于指定全局的设备,从1.8版本开始,Paddle提供了[device_guard](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/fluid_cn/device_guard_cn.html)接口,用于指定部分OP的运行设备,此教程会介绍device_guard的使用场景,以及如何使用该接口对模型进行优化。
+
+如果使用了`fluid.CUDAPlace`设置了全局的执行设备,框架将尽可能地将OP设置在GPU上执行,因此有可能会遇到显存不够的情况。`device_guard`可以用于设置OP的执行设备,如果将部分层设置在CPU上运行,就能够充分利用CPU大内存的优势,避免显存超出。
+
+有时尽管指定了全局的执行设备为GPU,但框架在自动分配OP执行设备时,可能会将部分OP设置在CPU上执行。另外,个别OP会将输出存储在CPU上。在以上的场景中,常常会发生不同设备间的数据传输,可能会影响模型的性能。使用`device_guard`可以避免模型运行中不必要的数据传输。在下面的内容中,将会详细介绍如何通过[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/profiler_cn.html)工具分析数据传输开销,以及如何使用`device_guard`避免不必要的数据传输,从而提升模型性能。
+
+## 如何避免显存超出
+
+下面示例代码中的`embedding`层,其参数`size`包含两个元素,第一个元素为`vocab_size` (词表大小), 第二个为`emb_size`(`embedding`层维度)。实际场景中,词表可能会非常大。示例代码中,词表大小被设置为10000000。如果在GPU模式下运行,该层创建的权重矩阵的大小为(10000000, 150),仅这一层就需要5.59G的显存,如果词表大小继续增加,极有可能会导致显存超出。
+
+```python
+import paddle.fluid as fluid
+
+data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64')
+label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32')
+emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32')
+out = fluid.layers.l2_normalize(x=emb, axis=-1)
+
+cost = fluid.layers.square_error_cost(input=out, label=label)
+avg_cost = fluid.layers.mean(cost)
+sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
+sgd_optimizer.minimize(avg_cost)
+
+place = fluid.CUDAPlace(0)
+exe = fluid.Executor(place)
+exe.run(fluid.default_startup_program())
+result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost])
+```
+
+`embedding`是根据`input`中的`id`信息从`embedding`矩阵中查询对应`embedding`信息,在CPU上进行计算,其速度也是可接受的。因此,可以参考如下代码,使用`device_guard`将`embedding`层设置在CPU上,以利用CPU内存资源。那么,除了`embedding`层,其他各层都会在GPU上运行。
+
+```python
+import paddle.fluid as fluid
+
+data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64')
+label = fluid.layers.fill_constant(shape=[1, 150], value=0.5, dtype='float32')
+with fluid.device_guard("cpu"):
+ emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32')
+out = fluid.layers.l2_normalize(x=emb, axis=-1)
+
+cost = fluid.layers.square_error_cost(input=out, label=label)
+avg_cost = fluid.layers.mean(cost)
+sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
+sgd_optimizer.minimize(avg_cost)
+
+place = fluid.CUDAPlace(0)
+exe = fluid.Executor(place)
+exe.run(fluid.default_startup_program())
+result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost])
+```
+
+在显存足够的情况下,可不必进行这样的设置。
+
+## 如何减少数据传输
+### 使用profile工具确认是否发生了数据传输
+首先对模型的性能数据进行分析,找到发生数据传输的原因。如下列代码所示,可以利用[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/profiler_cn.html)工具进行分析。
+
+```python
+import paddle.fluid as fluid
+import paddle.fluid.compiler as compiler
+import paddle.fluid.profiler as profiler
+
+data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')
+data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')
+shape = fluid.layers.shape(data2)
+shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])
+out = fluid.layers.crop_tensor(data1, shape=shape)
+place = fluid.CUDAPlace(0)
+exe = fluid.Executor(place)
+exe.run(fluid.default_startup_program())
+compiled_prog = compiler.CompiledProgram(fluid.default_main_program())
+with profiler.profiler('All', 'total') as prof:
+ for i in range(10):
+ result = exe.run(program=compiled_prog, fetch_list=[out])
+```
+
+在程序运行结束后,将会自动地打印出profile report。在下面的profile report中,可以看到 `GpuMemCpy Summary`中给出了2项数据传输的调用耗时。在OP执行过程中,如果输入Tensor所在的设备与OP执行的设备不同,就会发生`GpuMemcpySync`,通常我们可以直接优化的就是这一项。进一步分析,可以看到`slice`和`crop_tensor`执行中都发生了`GpuMemcpySync`。尽管我们在程序中设置了GPU模式运行,但是框架中有些OP,例如shape,会将输出结果放在CPU上。
+
+```text
+-------------------------> Profiling Report <-------------------------
+
+Note! This Report merge all thread info into one.
+Place: All
+Time unit: ms
+Sorted by total time in descending order in the same thread
+
+Total time: 26.6328
+ Computation time Total: 13.3133 Ratio: 49.9884%
+ Framework overhead Total: 13.3195 Ratio: 50.0116%
+
+------------------------- GpuMemCpy Summary -------------------------
+
+GpuMemcpy Calls: 30 Total: 1.47508 Ratio: 5.5386%
+ GpuMemcpyAsync Calls: 10 Total: 0.443514 Ratio: 1.66529%
+ GpuMemcpySync Calls: 20 Total: 1.03157 Ratio: 3.87331%
+
+------------------------- Event Summary -------------------------
+
+Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio.
+FastThreadedSSAGraphExecutorPrepare 10 9.16493 9.152509 (0.998645) 0.012417 (0.001355) 0.025192 8.85968 0.916493 0.344122
+shape 10 8.33057 8.330568 (1.000000) 0.000000 (0.000000) 0.030711 7.99849 0.833057 0.312793
+fill_constant 20 4.06097 4.024522 (0.991025) 0.036449 (0.008975) 0.075087 0.888959 0.203049 0.15248
+slice 10 1.78033 1.750439 (0.983212) 0.029888 (0.016788) 0.148503 0.290851 0.178033 0.0668471
+ GpuMemcpySync:CPU->GPU 10 0.45524 0.446312 (0.980388) 0.008928 (0.019612) 0.039089 0.060694 0.045524 0.0170932
+crop_tensor 10 1.67658 1.620542 (0.966578) 0.056034 (0.033422) 0.143906 0.258776 0.167658 0.0629515
+ GpuMemcpySync:GPU->CPU 10 0.57633 0.552906 (0.959357) 0.023424 (0.040643) 0.050657 0.076322 0.057633 0.0216398
+Fetch 10 0.919361 0.895201 (0.973721) 0.024160 (0.026279) 0.082935 0.138122 0.0919361 0.0345199
+ GpuMemcpyAsync:GPU->CPU 10 0.443514 0.419354 (0.945526) 0.024160 (0.054474) 0.040639 0.059673 0.0443514 0.0166529
+ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.341999 0.341999 (1.000000) 0.000000 (0.000000) 0.028436 0.057134 0.0341999 0.0128413
+eager_deletion 30 0.287236 0.287236 (1.000000) 0.000000 (0.000000) 0.005452 0.022696 0.00957453 0.010785
+ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.047864 0.047864 (1.000000) 0.000000 (0.000000) 0.003668 0.011592 0.0047864 0.00179718
+InitLocalVars 1 0.022981 0.022981 (1.000000) 0.000000 (0.000000) 0.022981 0.022981 0.022981 0.000862883
+```
+### 通过log查看发生数据传输的具体位置
+
+以上的示例程序比较简单,我们只用看profile report就能知道具体是哪些算子发生了数据传输。但是当模型比较复杂时,可能需要去查看更加详细的调试信息,可以打印出运行时的log去确定发生数据传输的具体位置。依然以上述程序为例,执行`GLOG_vmodule=operator=3 python test_case.py`,会得到如下log信息,会发现发生了2次数据传输:
+
+- `shape`输出的结果在CPU上,在`slice`运行时,`shape`的输出被拷贝到GPU上
+- `slice`执行完的结果在GPU上,当`crop_tensor`执行时,它会被拷贝到CPU上。
+
+```text
+I0406 14:56:23.286592 17516 operator.cc:180] CUDAPlace(0) Op(shape), inputs:{Input[fill_constant_1.tmp_0:float[1, 3, 5, 5]({})]}, outputs:{Out[shape_0.tmp_0:int[4]({})]}.
+I0406 14:56:23.286628 17516 eager_deletion_op_handle.cc:107] Erase variable fill_constant_1.tmp_0 on CUDAPlace(0)
+I0406 14:56:23.286725 17516 operator.cc:1210] Transform Variable shape_0.tmp_0 from data_type[int]:data_layout[NCHW]:place[CPUPlace]:library_type[PLAIN] to data_type[int]:data_layout[ANY_LAYOUT]:place[CUDAPlace(0)]:library_type[PLAIN]
+I0406 14:56:23.286763 17516 scope.cc:169] Create variable shape_0.tmp_0
+I0406 14:56:23.286784 17516 data_device_transform.cc:21] DeviceTransform in, src_place CPUPlace dst_place: CUDAPlace(0)
+I0406 14:56:23.286867 17516 tensor_util.cu:129] TensorCopySync 4 from CPUPlace to CUDAPlace(0)
+I0406 14:56:23.287099 17516 operator.cc:180] CUDAPlace(0) Op(slice), inputs:{EndsTensor[], EndsTensorList[], Input[shape_0.tmp_0:int[4]({})], StartsTensor[], StartsTensorList[]}, outputs:{Out[slice_0.tmp_0:int[4]({})]}.
+I0406 14:56:23.287140 17516 eager_deletion_op_handle.cc:107] Erase variable shape_0.tmp_0 on CUDAPlace(0)
+I0406 14:56:23.287220 17516 tensor_util.cu:129] TensorCopySync 4 from CUDAPlace(0) to CPUPlace
+I0406 14:56:23.287473 17516 operator.cc:180] CUDAPlace(0) Op(crop_tensor), inputs:{Offsets[], OffsetsTensor[], Shape[slice_0.tmp_0:int[4]({})], ShapeTensor[], X[fill_constant_0.tmp_0:float[1, 3, 8, 8]({})]}, outputs:{Out[crop_tensor_0.tmp_0:float[1, 3, 5, 5]({})]}.
+```
+
+### 使用device_guard避免不必要的数据传输
+
+在上面的例子中,`shape`输出的是一个1-D的Tensor,因此对于`slice`而言计算量很小。这种情况下如果将`slice`设置在CPU上运行,就可以避免2次数据传输。修改后的程序如下:
+
+```python
+import paddle.fluid as fluid
+import paddle.fluid.compiler as compiler
+import paddle.fluid.profiler as profiler
+
+data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')
+data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')
+shape = fluid.layers.shape(data2)
+with fluid.device_guard("cpu"):
+ shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])
+out = fluid.layers.crop_tensor(data1, shape=shape)
+place = fluid.CUDAPlace(0)
+exe = fluid.Executor(place)
+exe.run(fluid.default_startup_program())
+compiled_prog = compiler.CompiledProgram(fluid.default_main_program())
+with profiler.profiler('All', 'total') as prof:
+ for i in range(10):
+ result = exe.run(program=compiled_prog, fetch_list=[out])
+```
+再次观察profile report中`GpuMemCpy Summary`的内容,可以看到`GpuMemCpySync`已经被消除。在实际的模型中,若`GpuMemCpySync` 调用耗时占比较大,并且可以通过设置`device_guard`避免,那么就能够带来一定的性能提升。
+
+```text
+-------------------------> Profiling Report <-------------------------
+
+Note! This Report merge all thread info into one.
+Place: All
+Time unit: ms
+Sorted by total time in descending order in the same thread
+
+Total time: 14.5345
+ Computation time Total: 4.47587 Ratio: 30.7948%
+ Framework overhead Total: 10.0586 Ratio: 69.2052%
+
+------------------------- GpuMemCpy Summary -------------------------
+
+GpuMemcpy Calls: 10 Total: 0.457033 Ratio: 3.14447%
+ GpuMemcpyAsync Calls: 10 Total: 0.457033 Ratio: 3.14447%
+
+------------------------- Event Summary -------------------------
+
+Event Calls Total CPU Time (Ratio) GPU Time (Ratio) Min. Max. Ave. Ratio.
+FastThreadedSSAGraphExecutorPrepare 10 7.70113 7.689066 (0.998433) 0.012064 (0.001567) 0.032657 7.39363 0.770113 0.529852
+fill_constant 20 2.62299 2.587022 (0.986287) 0.035968 (0.013713) 0.071097 0.342082 0.13115 0.180466
+shape 10 1.93504 1.935040 (1.000000) 0.000000 (0.000000) 0.026774 1.6016 0.193504 0.133134
+Fetch 10 0.880496 0.858512 (0.975032) 0.021984 (0.024968) 0.07392 0.140896 0.0880496 0.0605797
+ GpuMemcpyAsync:GPU->CPU 10 0.457033 0.435049 (0.951898) 0.021984 (0.048102) 0.037836 0.071424 0.0457033 0.0314447
+crop_tensor 10 0.705426 0.671506 (0.951916) 0.033920 (0.048084) 0.05841 0.123901 0.0705426 0.0485346
+slice 10 0.324241 0.324241 (1.000000) 0.000000 (0.000000) 0.024299 0.07213 0.0324241 0.0223084
+eager_deletion 30 0.250524 0.250524 (1.000000) 0.000000 (0.000000) 0.004171 0.016235 0.0083508 0.0172365
+ScopeBufferedMonitor::post_local_exec_scopes_process 10 0.047794 0.047794 (1.000000) 0.000000 (0.000000) 0.003344 0.014131 0.0047794 0.00328831
+InitLocalVars 1 0.034629 0.034629 (1.000000) 0.000000 (0.000000) 0.034629 0.034629 0.034629 0.00238254
+ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.032231 0.032231 (1.000000) 0.000000 (0.000000) 0.002952 0.004076 0.0032231 0.00221755
+```
+
+### 总结
+
+- 使用profile工具对模型进行分析,看是否存在GpuMemcpySync的调用耗时。若存在,则进一步分析发生数据传输的原因。
+- 可以通过profile report找到发生GpuMemcpySync的OP。如果需要,可以通过打印log,找到GpuMemcpySync发生的具体位置。
+- 尝试使用`device_guard`设置部分OP的运行设备,来减少GpuMemcpySync的调用。
+- 最后可以通过比较修改前后模型的profile report,或者其他用来衡量性能的指标,确认修改后是否带来了性能提升。
diff --git a/doc/fluid/advanced_guide/performance_improving/index_cn.rst b/doc/fluid/advanced_guide/performance_improving/index_cn.rst
index 9103496255d6637161065237ac53a856f033a835..b50f091f8c70328d37c7cf3dc92a5b0f14a08f33 100644
--- a/doc/fluid/advanced_guide/performance_improving/index_cn.rst
+++ b/doc/fluid/advanced_guide/performance_improving/index_cn.rst
@@ -7,6 +7,8 @@
singlenode_training_improving/training_best_practice.rst
singlenode_training_improving/memory_optimize.rst
+ device_switching/device_switching.md
+ amp/amp.md
multinode_training_improving/cpu_train_best_practice.rst
multinode_training_improving/dist_training_gpu.rst
multinode_training_improving/gpu_training_with_recompute.rst
diff --git a/doc/fluid/advanced_guide/performance_improving/index_en.rst b/doc/fluid/advanced_guide/performance_improving/index_en.rst
index 30d74b04013e56cb84f790c4ba265bad2f4a3d17..f57e2a3d060daabf6733c969a9e85de69bc5ae24 100644
--- a/doc/fluid/advanced_guide/performance_improving/index_en.rst
+++ b/doc/fluid/advanced_guide/performance_improving/index_en.rst
@@ -5,7 +5,7 @@ Practice Improving
.. toctree::
:maxdepth: 1
-
+ singlenode_training_improving/memory_optimize_en.rst
multinode_training_improving/cpu_train_best_practice_en.rst
multinode_training_improving/gpu_training_with_recompute_en.rst
inference_improving/paddle_tensorrt_infer_en.md
diff --git a/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst b/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst
index 7f9ed763122d872fb9fa0ddc46cd6492f4b0b31a..ebe02dc6f577f47f323da7d0a967d952f0e5124e 100644
--- a/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst
+++ b/doc/fluid/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst
@@ -7,34 +7,39 @@
开始优化您的GPU分布式训练任务
---------------------------
-PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。
-通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,
-检查每项优化点并验证对应提升,从而提升最终的性能。
+PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,检查每项优化点并验证对应提升,从而提升最终的性能。
-一个简单的验证当前的训练程序是否需要进一步优化性能的方法,
-是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。
-如果GPU利用率较低,则可能存在较大的优化空间。
-下面主要从环境变量设置、训练策略设置、数据准备和训练方式四个方向介绍GPU分布式训练中常用的方法。
+一个简单的验证当前的训练程序是否需要进一步优化性能的方法,是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。如果GPU利用率较低,则可能存在较大的优化空间。下面主要从数据准备、训练策略设置和训练方式三个方面介绍GPU分布式训练中常用的优化方法。
-1、环境变量设置
-=============
+1、数据准备
+===========
-环境变量设置表
+数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求,优化reader性能需要考虑的点包括:
-.. csv-table::
- :header: "调节项", "可选值", "说明"
- :widths: 3, 3, 5
+ - 使用 :code:`DataLoader` 。参考 `这里 `_ 使用DataLoader,并建议开启 :code:`use_double_buffer` 。
+ - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练可以提升数据读取效率。
+ - 减少reader初始化时间 (infinite read)。在训练任务开始执行第一轮训练时,reader开始不断异步地从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练都重新填充队列,会产生一些时间的开销。所以,在使用DataLoader时,可以让reader函数不断地产生数据,直到训练循环结束:
- ":code:`FLAGS_sync_nccl_allreduce`", "0,1", "是否同步AllReduce操作。1表示开启,每次调用等待AllReduce同步"
- ":code:`FLAGS_fraction_of_gpu_memory_to_use`", "0~1之间的float值", "预先分配显存的占比"
- ":code:`NCCL_IB_DISABLE` ", "0,1", "是否启用RDMA多机通信。如果机器硬件支持,可以设置1,开启RDMA支持"
+ .. code-block:: python
+ :linenos:
-说明:
+ def infinite_reader(file_path):
+ while True:
+ with open(file_path) as fn:
+ for line in fn:
+ yield process(line)
+
+ def train():
+ ...
+ for pass_id in xrange(NUM_PASSES):
+ if pass_id == 0:
+ data_loader.start()
+ for batch_id in (iters_per_pass):
+ exe.run()
+ data_loader.reset()
-- 关于 :code:`FLAGS_sync_nccl_allreduce` ,配置 :code:`FLAGS_sync_nccl_allreduce=1` 让每次allreduce操作都等待完成,可以提升性能,详细原因和分析可以参考:https://github.com/PaddlePaddle/Paddle/issues/15049。
-- 关于 :code:`FLAGS_fraction_of_gpu_memory_to_use` ,配置 :code:`FLAGS_fraction_of_gpu_memory_to_use=0.95` ,0.95是指95%的显存会预先分配。设置的范围是0.0~1.0。注意,设置成0.0会让每次显存分配都调用 :code:`cudaMalloc` 这样会极大的降低训练性能。
-- 关于 :code:`NCCL_IB_DISABLE` ,在使用NCCL2模式训练时,其会默认尝试开启RDMA通信,如果系统不支持,则会自动降级为使用TCP通信。可以通过打开环境变量 :code:`NCCL_DEBUG=INFO` 查看NCCL是否选择了开启RDMA通信。如果需要强制使用TCP方式通信,可以设置 :code:`NCCL_IB_DISABLE=1` 。
+另外,可以使用DALI库提升数据处理性能。DALI是NVIDIA开发的数据加载库,更多内容请参考 `官网文档 `_ 。飞桨中如何结合使用DALI库请参考 `使用示例 `_ 。
2、训练策略设置
===========
@@ -48,9 +53,11 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性
":code:`num_threads`", "int", "1", "CPU线程数"
":code:`nccl_comm_num`", "int", "1", "nccl通信器数量"
":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将AllReduce操纵进行融合"
- ":code:`use_hierarchical_allreduce` ", "bool", "False","分级式reduce"
+ ":code:`use_hierarchical_allreduce` ", "bool", "False", "分级式reduce"
":code:`num_iteration_per_drop_scope`", "int", "1", "scope drop频率,设置每隔几个batch的迭代之后执行一次清理scope"
":code:`fetch_frequency`", "int", "1", "fetch的刷新频率"
+ ":code:`fuse_bn_act_ops`", "bool", "False", "是否开启batch normalization和激活函数的融合"
+ ":code:`fuse_elewise_add_act_ops`", "bool", "False", "是否开启elementwise add函数和激活函数的融合"
说明:
@@ -58,7 +65,7 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性
- 关于AllReduce融合 :code:`fuse_all_reduce_ops` ,默认情况下会将同一layer中参数的梯度的AllReduce操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次AllReduce操作,现在只用一次AllReduce 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 和 :code:`FLAGS_fuse_parameter_groups_size` 两个环境变量选项。用户可以指定融合AllReduce操作之后,每个AllReduce操作的梯度字节数,比如希望每次AllReduce调用传输16MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=16` ,经验值为总通信量的十分之一。可以指定每次AllReduce操作的最大层数,即到达该层数就进行AllReduce,如指定50层 :code:`export FLAGS_fuse_parameter_groups_size=50` 。注意:目前不支持sparse参数梯度。
- 关于使用分级式reduce :code:`use_hierarchical_allreduce` 。对于多机模式,针对小数据量的通信,Ring AllReduce通信效率低,采用Hierarchical AllReduce可以解决该问题。
- 关于降低scope drop频率 :code:`num_iteration_per_drop_scope` 和fetch频率 :code:`fetch_frequency` 。减少scope drop和fetch频率,可以减少频繁的变量内存申请、释放和拷贝,从而提升性能。
-- 其他训练策略的参数可以参考 `这里 <../best_practice/training_best_practice.html>`_ 。
+- 关于操作融合:通过参数融合可以提升训练性能。
设置这些参数可以参考:
@@ -88,67 +95,12 @@ PaddlePaddle Fluid可以支持在现代GPU [#]_ 服务器集群上完成高性
exe.run([])
-3、数据准备
-===========
-
-1、使用GPU完成部分图片预处理
-
-如果可能,使用GPU完成部分数据预处理,比如图片Tensor的归一化:
-
-.. code-block:: python
- :linenos:
-
- image = fluid.layers.data()
- img_mean = fluid.layers.create_global_var([3, 1, 1], 0.0, "float32", name="img_mean", persistable=True)
- img_std = fluid.layers.create_global_var([3, 1, 1], 0.0, "float32", name="img_std", persistable=True)
- t1 = fluid.layers.elementwise_sub(image / 255.0, img_mean, axis=1)
- image = fluid.layers.elementwise_div(t1, img_std, axis=1)
-
-对输入的图片Tensor,使用 :code:`fluid.layers` 完成图片数据归一化预处理,
-这样可以减轻CPU预处理数据的负担,提升总体训练速度。
-
-2、优化reader性能
-
-数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求,
-优化reader性能需要考虑的点包括:
-
- - 使用 :code:`pyreader` 。参考 `这里 <../../user_guides/howto/prepare_data/use_py_reader.html>`_ 使用pyreader,并开启 :code:`use_double_buffer` 。
- - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练
- - 减少reader初始化时间 (infinite read)
- 在训练任务开始执行第一轮训练时,reader开始异步的,不断的从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据
- 填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练
- 都重新填充队列,会产生一些时间的开销。所以,在使用pyreader时,可以让reader函数不断的产生数据,直到训练循环手动break:
-
- .. code-block:: python
- :linenos:
-
- def infinite_reader(file_path):
- while True:
- with open(file_path) as fn:
- for line in fn:
- yield process(line)
-
- def train():
- ...
- for pass_id in xrange(NUM_PASSES):
- if pass_id == 0:
- pyreader.start()
- for batch_id in (iters_per_pass):
- exe.run()
- pyreader.reset()
-
-4、训练方式
+3、训练方式
===========
1、Local SGD
-GPU多机多卡同步训练过程中存在慢trainer现象,
-即每步中训练快的trainer的同步通信需要等待训练慢的trainer。
-由于每步中慢trainer的rank具有随机性,
-因此我们使用局部异步训练的方式——LocalSGD,
-通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,
-从而提升同步训练性能。
-Local SGD训练方式主要有三个参数,分别是:
+GPU多机多卡同步训练过程中存在慢trainer现象,即每步中训练快的trainer的同步通信需要等待训练慢的trainer。由于每步中慢trainer的rank具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,从而提升同步训练性能。Local SGD训练方式主要有三个参数,分别是:
.. csv-table::
:header: "选项", "类型", "可选值", "说明"
@@ -163,18 +115,14 @@ Local SGD训练方式主要有三个参数,分别是:
- Local SGD的warmup步长 :code:`local_sgd_is_warm_steps` 影响最终模型的泛化能力,一般需要等到模型参数稳定之后在进行Local SGD训练,经验值可以将学习率第一次下降时的epoch作为warmup步长,之后再进行Local SGD训练。
- Local SGD步长 :code:`local_sgd_steps` ,一般该值越大,通信次数越少,训练速度越快,但随之而来的时模型精度下降。经验值设置为2或者4。
-具体的Local SGD的训练代码可以参考:
-https://github.com/PaddlePaddle/Fleet/tree/develop/examples/local_sgd/resnet
+具体的Local SGD的训练代码可以参考:https://github.com/PaddlePaddle/Fleet/tree/develop/examples/local_sgd/resnet
2、使用混合精度训练
-V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算
-场景极大的提升性能。使用混合精度计算的例子可以参考:
-https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training
+V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算场景极大的提升性能。使用混合精度计算的例子可以参考:https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training
-目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也
-可以参考以上的实现完成验证。
+目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也可以参考以上的实现完成验证。
附录
----
diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst
index 5875f94b2d436aec41b531004799b70bf017f463..ae9238813a614c7e3c022e06c59995e21f589c10 100644
--- a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst
+++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.rst
@@ -7,20 +7,26 @@
1. PaddlePaddle的显存分配策略
===========================
-1.1. 显存预分配策略
-----------------
+1.1. 显存自增长AutoGrowth策略
+--------------------------
+自1.6+的版本起,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存,且已于1.7+版本中默认开启,方便用户在同一张GPU卡上同时运行多个任务。
-由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。因此PaddlePaddle采用了显存预分配的策略加速显存分配。具体方式为:
+由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。
+因此显存自增长AutoGrowth策略会缓存已分配到的显存,供后续分配使用,具体方式为:
-- 在分配requested_size大小的显存时,
- - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。
- - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。
+- 在前几次显存分配时,框架会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。
-- 在释放free_size大小的显存时,
- - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。
- - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。
+- 在随后的显存分配时,框架会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。
+
+因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。
-上述的chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为:
+1.2. 显存预分配策略
+----------------
+
+除了显存自增长AutoGrowth策略以外,PaddlePaddle还提供了显存预分配策略。显存预分配策略是PaddlePaddle 1.7版本前的默认显存分配策略。
+
+显存预分配策略会在第一次分配时分配很大chunk_size的显存块,随后的显存分配大多从预分配的显存块中切分获得。
+其中,chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为:
.. code-block:: python
@@ -28,7 +34,17 @@
:code:`FLAGS_fraction_of_gpu_memory_to_use` 的默认值为0.92,即框架预先分配显卡92%的当前可用显存值。
-若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的chunk,例如:
+显存预分配策略分配显存的具体方式为:
+
+- 在分配requested_size大小的显存时,
+ - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。
+ - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。
+
+- 在释放free_size大小的显存时,
+ - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。
+ - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。
+
+若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的显存块,例如:
.. code-block:: shell
@@ -37,30 +53,23 @@
若 :code:`FLAGS_fraction_of_gpu_memory_to_use` 设为0,则每次显存分配和释放均会调用 :code:`cudaMalloc` 和 :code:`cudaFree` ,会严重影响性能,不建议你使用。
只有当你想测量网络的实际显存占用量时,你可以设置 :code:`FLAGS_fraction_of_gpu_memory_to_use` 为0,观察nvidia-smi显示的显存占用情况。
-1.2. 显存自增长AutoGrowth策略
---------------------------
-在1.6+的版本中,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存。若您希望按需分配显存,您可选择使用显存自增长AutoGrowth策略。
-
-在前几次显存分配时,会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。
+1.3. 显存分配策略的选择方式
+-----------------------
+自1.6+版本起,PaddlePaddle同时支持显存自增长AutoGrowth策略和显存预分配策略,并通过环境变量 :code:`FLAGS_allocator_strategy` 控制。
-在随后的显存分配时,会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。
-
-因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。
-
-显存自增长AutoGrowth策略通过设置环境变量 :code:`FLAGS_allocator_strategy` 开启,设置方式为:
+选择显存自增长AutoGrowth的方式为:
.. code-block:: shell
- export FLAGS_allocator_strategy=auto_growth
+ export FLAGS_allocator_strategy=auto_growth # 选择显存自增长AutoGrowth策略
-对应地,显存预分配策略通过以下方法开启:
+选择显存预分配策略的方式为:
.. code-block:: shell
- export FLAGS_allocator_strategy=naive_best_fit
-
-环境变量 :code:`FLAGS_allocator_strategy` 的默认值为naive_best_fit,表示默认使用显存预分配策略。
+ export FLAGS_allocator_strategy=naive_best_fit # 选择显存预分配策略
+此外,自1.7.2+版本起,PaddlePaddle提供了环境变量 :code:`FLAGS_gpu_memory_limit_mb` ,用于控制单个任务进程可分配的最大显存,单位是MB。默认值是0,表示没有限制,可分配全部显存。如果设置为大于0的值,则会在分配的显存超过限制时报错,即使此时系统还存在空闲的显存空间。
2. PaddlePaddle的存储优化策略
===========================
diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2a1e3ecb0c52cc198c8e6c6e54f8a175222434a6
--- /dev/null
+++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize_en.rst
@@ -0,0 +1,178 @@
+.. _api_guide_memory_optimize_en:
+
+###########
+Memory Allocation and Optimization
+###########
+
+1. Memory Allocation Strategy
+===========================
+
+1.1. AutoGrowth Strategy
+--------------------------
+
+Since version 1.6+, PaddlePaddle supports the AutoGrowth strategy, which allocates memory on demand.
+AutoGrowth strategy has been enabled by default in version 1.7+, making it convenient for users to
+run multiple tasks on the same GPU card at the same time.
+
+Because the native CUDA system calls :code:`cudaMalloc` and :code:`cudaFree` are synchronous operations,
+which are very time-consuming, the AutoGrowth strategy will cache the allocated memory for subsequent allocation.
+The specific methods are as follows:
+
+- In the first few memory allocations, PaddlePaddle framework will call :code:`cudaMalloc` and allocate memory on demand. When releasing the allocated memory, it will not call :code:`cudaFree` to return the memory to GPU, but cache the memory inside the framework.
+
+- In the subsequent allocations, PaddlePaddle framework will first check if there is a fit block (block size larger than the required memory size) in the cached memory. If there is, it will split the required memory from the fit block and return. Otherwise, it will call :code:`cudaMalloc` to allocate memory from GPU. The allocated memory are also cached when being released for subsequent allocation.
+
+Therefore, the AutoGrowth strategy may slow the speed in the first few batches of model training,
+but will not affect the speed in the subsequent training process.
+
+1.2. Pre-Allocation Strategy
+----------------
+
+In addition to the AutoGrowth strategy, paddlepaddle also provides a Pre-Allocation strategy,
+which is the default memory allocation strategy before paddlepaddle 1.7.
+
+The Pre-Allocation strategy allocates a large size chunk at the first allocation, and the subsequent memory allocation is mostly obtained from the pre allocated memory chunk.
+Among them, the chunk size is determined by the environment variable :code:`FLAGS_fraction_of_gpu_memory_to_use`, and the calculation formula of chunk size is:
+
+.. code-block:: python
+
+ chunk_size = FLAGS_fraction_of_gpu_memory_to_use * number of current available memory of a single GPU card
+
+The default value of :code:`FLAGS_fraction_of_gpu_memory_to_use` is 0.92, that is, the framework will pre allocates
+92% of the currently available memory of the GPU card.
+
+The specific way of Pre-Allocation strategy to allocate GPU memory is:
+
+- When allocating memory of requested_size,
+ - If requested_size <= chunk_size, the framework will first allocate a memory chunk of chunk_size, then split a block of requested_size and return the block. Every subsequent memory allocation will be performed on the chunk.
+ - If requested_size > chunk_size, the framework will call :code:`cudaMalloc` to allocate memory block of requested_size and return.
+
+- When freeing memory of requested_size,
+ - If free_size <= chunk_size, the framework will put the memory block back into the pre-allocated chunk, instead of returning back to GPU.
+ - If free_size > chunk_size, the framework will call :code:`cudaFree` and return the memory back to GPU.
+
+If there are other tasks on your GPU card that occupy the memory, you can appropriately decrease :code:`FLAGS_fraction_of_gpu_memory_to_use`
+to ensure that the framework can pre-allocate the memory block of appropriate size, for example
+
+.. code-block:: shell
+
+ export FLAGS_fraction_of_gpu_memory_to_use=0.4 # Pre-allocate 40% memory of a single GPU card
+
+If :code:`FLAGS_fraction_of_gpu_memory_to_use` is set to 0, the framework will call :code:`cudaMalloc` and :code:`cudaFree` every time the memory is allocated and released, which will seriously affect the performance and is not recommended. Only when you want to measure the actual memory usage of the network, you could set :code:`FLAGS_fraction_of_gpu_memory_to_use` to 0, and observe the memory usage of command nvidia-smi display.
+
+1.3. Configuration of memory allocation strategy
+-----------------------
+Since version 1.6+, PaddlePaddle supports both the AutoGrowth strategy and the Pre-Allocation Strategy, and control the strategy used in framework by
+the environment variable :code:`FLAGS_allocator_strategy`.
+
+Use AutoGrowth strategy:
+
+.. code-block:: shell
+
+ export FLAGS_allocator_strategy=auto_growth # Use AutoGrowth strategy
+
+Use Pre-Allocation strategy:
+
+.. code-block:: shell
+
+ export FLAGS_allocator_strategy=naive_best_fit # Use Pre-Allocation strategy
+
+Plus, since version 1.7.2+, PaddlePaddle provides an environment variable :code:`FLAGS_gpu_memory_limit_mb`, which controls the maximum gpu memory limit that the process can allocate.
+If it is equal to 0, there would be no limit and all gpu memory would be available to the process. If it is larger than 0, the process would raise out of memory error if the allocated
+memory exceeds the limit even though there is available memory on the gpu card. The unit is MB and default value is 0.
+
+2. Memory Optimization Strategy
+===========================
+
+Paddlepaddle provides several general memory optimization methods to optimize the memory usage of your network (including general memory and GPU memory).
+
+2.1. GC Strategy: memory garbage eager collection
+-------------------------
+
+The principle of GC(Garbage Collection)is to release the memory space of useless variables eagerly during network running,
+in order to save memory space. GC is suitable for training and inference using Executor or ParallelExecutor, but it is not suitable for C++ inference library.
+
+**Since version 1.6+, GC Strategy is enabled by default.**
+
+GC Strategy is controlled by 3 environment variable:
+
+
+- :code:`FLAGS_eager_delete_tensor_gb`
+
+Variable to enable GC, its data type is double. The default value is -1 in PaddlePaddle with version < 1.6,
+and is 0 in PaddlePaddle with version >= 1.6. GC Strategy will cache a certain amount of memory garbage and release it uniformly.
+:code:`FLAGS_eager_delete_tensor_gb` means the threshold of cached memory garbage, the unit of which is GB. **It is recommended to set** :code:`FLAGS_eager_delete_tensor_gb=0`.
+
+If :code:`FLAGS_eager_delete_tensor_gb=0`, once there is memory garbage, it will be collected immediately to save memory.
+
+If :code:`FLAGS_eager_delete_tensor_gb=1`, the memory garbage is collected when the cached amount of garbage reaches 1GB.
+
+If :code:`FLAGS_eager_delete_tensor_gb<0`, GC Strategy is disabled.
+
+
+- :code:`FLAGS_memory_fraction_of_eager_deletion`
+
+Variable to control GC Strategy, its data type is double. The default value is 1, range [0,1]. It is only suitable for ParallelExecutor or CompiledProgram+with_data_parallel.
+GC will sort the variables in descending order according to the memory space occupied by the variables,
+and only collect the memory space of top :code:`FLAGS_memory_fraction_of_eager_deletion` variables.
+**It is recommended to remain default value**, that is :code:`FLAGS_memory_fraction_of_eager_deletion=1`.
+
+If :code:`FLAGS_memory_fraction_of_eager_deletion=0.6`, top 60% variables will be collected.
+
+If :code:`FLAGS_memory_fraction_of_eager_deletion=0`, no variable will be collected, GC Strategy is disabled.
+
+If :code:`FLAGS_memory_fraction_of_eager_deletion=1`, all variables will be collected.
+
+
+- :code:`FLAGS_fast_eager_deletion_mode`
+
+Variable to enable fast GC Strategy, its type is bool. The default value is True, which means use fast GC Strategy.
+Fast GC Strategy will collect the memory garbage immediately instead of waiting for CUDA Kernel finish. **It is recommended to remain default value**, that is :code:`FLAGS_fast_eager_deletion_mode=True`.
+
+
+2.2. Inplace Strategy: output reuses input inside operator
+----------------------------------
+
+The principle of Inplace strategy is that the output of some operators can reuses the memory space of input.
+For example, the output and input of operator :code:`reshape` can reuse the same memory space.
+
+Inplace Strategy is suitable for ParallelExecutor or CompiledProgram+with_data_parallel, which can be set through :code:`BuildStrategy`.
+The Strategy is not suitable for Executor+Program or C++ inference library.
+
+**Since version 1.6+, Inplace Strategy is enabled by default.**
+
+The specific way of Inplace strategy is:
+
+.. code-block:: python
+
+ build_strategy = fluid.BuildStrategy()
+ build_strategy.enable_inplace = True # Enable Inplace Strategy
+
+ compiled_program = fluid.CompiledProgram(train_program)
+ .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy)
+
+
+In PaddlePaddle with version < 1.6, due to of some design problems, when the Inplace Strategy is enabled,
+the variable in fetch_list in the subsequent :code:`exe.run` must be persistent.
+That is, if you the variables you want to fetch are loss and acc, you must set:
+
+.. code-block:: python
+
+ loss.persistable = True
+ acc.persistable = True
+
+
+**Since version 1.6+, setting variables in fetch_list to persistable is not needed.**
+
+
+3. Memory Optimization Best Practice
+=======================
+
+We recommend the best memory optimization strategy as:
+
+- Enable GC strategy:set :code:`FLAGS_eager_delete_tensor_gb=0`.
+
+- Enable Inplace strategy:set :code:`build_strategy.enable_inplace = True`, and set variables in fetch_list to persistable using :code:`var.persistable = True` when the version of PaddlePaddle < 1.6.
+
+**Since version 1.6+, the above optimal strategy have been enabled by default and setting variables in fetch_list to persistable is not needed.**
+
diff --git a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst
index 7a8e2573ec7b77e0335b24d042a11edfbd2098ed..95e71abd70e3605f94f3faa0aa1367db499b022b 100644
--- a/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst
+++ b/doc/fluid/advanced_guide/performance_improving/singlenode_training_improving/training_best_practice.rst
@@ -1,4 +1,5 @@
-.. training_best_practice:
+.. _api_guide_singlenode_training_best_practice:
+
#####################
单机训练优秀实践
@@ -13,7 +14,7 @@ PaddlePaddle Fluid可以支持在现代CPU、GPU平台上进行训练。如果
1. 网络构建过程中的配置优化
-=============
+==================
这部分优化与具体的模型有关,在这里,我们列举出一些优化过程中遇到过的一些示例。
@@ -35,93 +36,145 @@ cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经
bias_attr=None,
use_cudnn=True,
act=None,
- name=None)
+ name=None,
+ data_format="NCHW")
在 :code:`use_cudnn=True` 时,框架底层调用的是cuDNN中的卷积操作。
通常cuDNN库提供的操作具有很好的性能表现,其性能明显优于Paddle原生的CUDA实现,比如 :code:`conv2d` 。但是cuDNN中有些操作的性能较差,比如: :code:`conv2d_transpose` 在 :code:`batch_size=1` 时、:code:`pool2d` 在 :code:`global_pooling=True` 时等,这些情况下,cuDNN实现的性能差于Paddle的CUDA实现,建议手动设置 :code:`use_cudnn=False` 。
-1.2 使用融合功能的API
-^^^^^^^^^^^^^^^^
+1.2 减少模型中Layer的个数
+^^^^^^^^^^^^^^^^^^
+
+为方便用户使用,飞桨提供一些不同粒度的Layer,其中有些Layer的组合可以通过单个Layer完成。比如:
-Paddle提供一些粗粒度的API,这些API融合了多个细粒度API的计算,比如:
+(1) :code:`fluid.layers.softmax_with_cross_entropy` ,该操作其实是 :code:`fluid.layers.softmax` 和 :code:`fluid.layers.cross_entropy` 的组合,因此如果模型中有出现
.. code-block:: python
logits = fluid.layers.softmax(logits)
loss = fluid.layers.cross_entropy(logits, label, ignore_index=255)
-和
+可以直接替换成
.. code-block:: python
loss = fluid.layers.softmax_with_cross_entropy(logits, label, ignore_index=255, numeric_stable_mode=True)
-用户网络配置中使用融合功能的API,通常能取得更好的计算性能。
+
+(2) 如果模型中需要对数据进行标准化,可以直接使用 :code:`fluid.layers.data_norm` ,而不用通过一系列layer组合出数据的标准化操作。
+
+因此,建议在构建模型时优先使用飞桨提供的单个Layer完成所需操作,这样减少模型中Layer的个数,并因此加速模型训练。
+
2. 数据准备优化
=============
-2.1 分析数据准备部分的耗时
-^^^^^^^^^^^^^^^^
+数据准备通常分为两部分:第一部分是数据加载,即程序从磁盘中加载训练/预测数据;第二部分是数据预处理,程序对加载的数据进行预处理,比如图像任务通常需要进行数据增强、Shuffle等。
+这两部分需要用户根据自己的模型需要进行设置,只需要最后得到Data Reader接口即可。Data Reader返回iterable对象,可以每次返回一条样本或者一组样本。代码示例如下:
-数据准备部分通常分为两个部分:数据读取部分和预处理部分。
+.. code-block:: python
+
+ def data_reader(width, height):
+ def reader():
+ while True:
+ yield np.random.uniform(-1, 1,size=width*height), np.random.randint(0,10)
+ return reader
+ train_data_reader = data_reader(32, 32)
-- 数据读取部分:用户需要在Python端从磁盘中加载数据,然后将数据feed到Fluid的执行器中。
-- 数据预处理部分:用户需要在Python端进行数据预处理,比如图像任务通常需要进行数据增强、裁剪等。
-Fluid提供了两种数据读取方式:**同步数据读取** 和 **异步数据读取**,详情请参考文档 `如何准备数据 `_ 。
+Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use_numpy_array_as_train_data` 和 :ref:`user_guides_use_py_reader` ,详情请参考文档 :ref:`user_guide_prepare_data` 。
-2.1.1 同步数据读取
->>>>>>>>>>>>>>>
+2.1 同步数据读取
+^^^^^^^^^^^^^^^^
同步数据读取是一种简单并且直观的数据准备方式,代码示例如下:
.. code-block:: python
+ image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32")
+ label = fluid.data(name="label", shape=[None, 1], dtype="int64")
+ # 模型定义
+ # ……
+ prediction = fluid.layers.fc(input=image, size=10)
+ loss = fluid.layers.cross_entropy(input=prediction, label=label)
+ avg_loss = fluid.layers.mean(loss)
+ # ……
+ # 读取数据
+ # paddle.dataset.mnist.train()返回数据读取的Reader,每次可以从Reader中读取一条样本,batch_size为128
+ train_reader = paddle.batch(paddle.dataset.mnist.train(), 128)
+
# 读取数据
end = time.time()
for batch_id, batch in enumerate(train_reader):
data_time = time.time() - end
# 训练网络
- executor.run(feed=[...], fetch_list=[...])
+ executor.run(feed={...}, fetch_list=[...])
batch_time = time.time() - end
end = time.time()
-用户通过调用自己编写的reader函数,reader每次输出一个batch的数据,并将数据传递给执行器。因此数据准备和执行是顺序进行的,用户可通过加入Python计时函数 time.time() 来统计数据准备部分和执行部分所占用的时间。
-2.1.2 异步数据读取
->>>>>>>>>>>>>>>
+用户首先需要通过 :code:`fluid.data` 定义模型的输入,然后根据输入构建模型,最后从事先自定义的Reader函数中获取一个batch的数据,并将数据传递给执行器。
+
+采用同步数据读取方式时,用户可通过加入Python计时函数 :code:`time.time()` 来统计数据准备部分和执行部分所占用的时间。
+由于数据准备和执行是顺序进行的,所以程序的执行速度可能较慢。如果用户想进行模型调试的话,同步数据读取是一个不错的选择。
-Paddle里面使用py_reader接口来实现异步数据读取,代码示例如下:
+
+2.2 异步数据读取
+^^^^^^^^^^^^^^^^
+
+Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来实现异步数据读取,代码示例如下:
.. code-block:: python
- # 启动py_reader
- train_py_reader.start()
+ image = fluid.data(name="image", shape=[None, 1, 28, 28], dtype="float32")
+ label = fluid.data(name="label", shape=[None, 1], dtype="int64")
+ dataloader = fluid.io.DataLoader.from_generator(
+ feed_list=[image, label],
+ capacity=64,
+ iterable=False,
+ use_double_buffer=True)
+ # 模型定义
+ # ……
+ prediction = fluid.layers.fc(input=image, size=10)
+ loss = fluid.layers.cross_entropy(input=prediction, label=label)
+ avg_loss = fluid.layers.mean(loss)
+ # ……
+ # 读取数据
+ train_reader = paddle.batch(paddle.dataset.mnist.train(), 128)
+ data_loader.set_batch_generator(train_reader, places=places)
+
+ # 启动data_loader
+ data_loader.start()
batch_id = 0
try:
end = time.time()
while True:
- print("queue size: ", train_py_reader.queue.size())
+ print("queue size: ", data_loader.queue.size())
loss, = executor.run(fetch_list=[...])
# ...
batch_time = time.time() - end
end = time.time()
batch_id += 1
except fluid.core.EOFException:
- train_py_reader.reset()
+ data_loader.reset()
-使用异步数据读取时,Paddle的C++端会维护一个数据队列,Python端通过单独的线程向C++端的数据队列传入数据。用户可以在训练过程中输出数据队列中数据的个数,如果queue size始终不为空,表明Python端数据准备的速度比模型执行的速度快,这种情况下Python端的数据读取可能不是瓶颈。
+用户首先需要通过 :code:`fluid.io.DataLoader.from_generator` 定义DataLoader对象,并使用 :code:`set_batch_generator` 方法将自定义的Reader与DataLoader绑定。
+若DataLoader被定义成不可迭代的( :code:`iterable=False` ),在训练开始之前,通过调用 :code:`start()` 方法来启动数据读取。
+在数据读取结束之后, :code:`executor.run` 会抛出 :code:`fluid.core.EOFException` ,表示训练已经遍历完Reader中的所有数据。
-此外,Paddle提供的一些FLAGS也能很好的帮助分析性能,比如通过设置 :code:`export FLAGS_reader_queue_speed_test_mode=True` ,数据队列中的训练数据在被读取之后,不会从数据队列中弹出,这样能够保证数据队列始终不为空,这样就能够很好的评估出数据读取所占的开销。**注意,FLAGS_reader_queue_speed_test_mode只能在分析的时候打开,正常训练模型时需要关闭**。
+采用异步数据读取时,Python端和C++端共同维护一个数据队列,Python端启动一个线程,负责向队列中插入数据,C++端在训练/预测过程中,从数据队列中获取数据,并将该数据从对队列中移除。
+用户可以在程序运行过程中,监测数据队列是否为空,如果队列始终不为空,表明数据准备的速度比模型执行的速度快,这种情况下数据读取可能不是瓶颈。
-2.2 优化数据准备速度的方法
-^^^^^^^^^^^^^^^^
+另外,Paddle提供的一些FLAGS也能很好的帮助分析性能。如果用户希望评估一下在完全没有数据读取开销情况下模型的性能,可以设置一下环境变量::code:`FLAGS_reader_queue_speed_test_mode` ,在该变量为True情况下,C++端从数据队列中获取数据之后,不会从数据队列中移除,这样能够保证数据队列始终不为空,从而避免了C++端读取数据时的等待开销。
-- 为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer` 。此外,用户可根据模型的实际情况设置数据队列的大小。
-- 如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,这时候需要考虑对Python的用户reader进行加速。常用的方法为:**使用Python多进程准备数据**。一个简单的使用多进程准备数据的示例,请参考 `YOLOv3 `_ 。
-- Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。
+**需要特别注意的是,** :code:`FLAGS_reader_queue_speed_test_mode` **只能在性能分析的时候打开,正常训练模型时需要关闭。**
+
+为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer=True` 。用户可根据模型的实际情况设置数据队列的大小。
+如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,就需要考虑对数据读取Reader进行加速。
+常用的方法是 **使用Python多进程准备数据** ,一个简单的使用多进程准备数据的示例,可以参考 `YOLOv3 `_ 。
+
+Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。
3. 模型训练相关优化
=============
@@ -129,85 +182,152 @@ Paddle里面使用py_reader接口来实现异步数据读取,代码示例如
3.1 执行器介绍
^^^^^^^^^^^^^^^^
-目前Paddle中有两个执行器, :code:`Executor` 和 :code:`ParallelExecutor` ,这两个执行器的区别:
+目前Paddle的Python API中提供了 :code:`fluid.compiler.CompiledProgram` 的概念,用户可以通过 :code:`CompiledProgram` 将传入的program进行编译。
+如果希望采用数据并行模式训练,只需要将 :code:`CompiledProgram` 返回的对象调用一下 :code:`with_data_parallel` 即可,最后统一通过 :code:`executor.run(…)` 执行compiled_program。
-执行调度器
->>>>>>>>>>>>>>>
+虽然统一通过 :code:`executor.run(…)` 接口来执行,实际底层的执行策略有两种,对应C++部分的两个执行器,即 :code:`Executor` 和 :code:`ParallelExecutor` ,如果用户采用数据并行模式,C++部分使用的是 :code:`ParallelExecutor` ,除此之外都是使用 :code:`Executor` 。
+这两个执行器的差别:
-.. csv-table::
+.. csv-table::
:header: "执行器 ", "执行对象", "执行策略"
:widths: 3, 3, 5
":code:`Executor`", ":code:`Program`", "根据 :code:`Program` 中Operator定义的先后顺序依次运行。"
":code:`ParallelExecutor`", "SSA Graph", "根据Graph中各个节点之间的依赖关系,通过多线程运行。"
-为了更好的分析模型, :code:`ParallelExecutor` 内部首先会将输入的 :code:`Program` 转为SSA Graph,然后根据 :code:`build_strategy` 中的配置,通过一系列的Pass对Graph进行优化,比如:memory optimize,operator fuse等优化。最后根据 :code:`execution_strategy` 中的配置执行训练任务。
-此外, :code:`ParallelExecutor` 支持数据并行,即单进程多卡和多进程多卡,关于 :code:`ParallelExecutor` 的具体介绍请参考 `文档 `_ 。
+可以看出, :code:`Executor` 的内部逻辑非常简单,但性能可能会弱一些,因为 :code:`Executor` 对于program中的操作是串行执行的。
+而 :code:`ParallelExecutor` 首先会将program转变为计算图,并分析计算图中节点间的连接关系,对图中没有相互依赖的节点(OP),通过多线程并行执行。
-为了统一 :code:`ParallelExecutor` 接口和 :code:`Executor` 接口,Paddle提供了 :code:`fluid.compiler.CompiledProgram` 接口,在数据并行模式下,该接口底层调用的是 :code:`ParallelExecutor` 。
+因此, :code:`Executor` 是一个轻量级的执行器,目前主要用于参数初始化、模型保存、模型加载。
+:code:`ParallelExecutor` 是 :code:`Executor` 的升级版本,目前 :code:`ParallelExecutor` 主要用于模型训练,包括单机单卡、单机多卡以及多机多卡训练。
-3.2 BuildStrategy中参数配置说明
-^^^^^^^^^^^^^^^^
-BuildStrategy配置选项
->>>>>>>>>>>>>>>
+:code:`ParallelExecutor` 执行计算图之前,可以对计算图进行一些优化,比如使计算图中的一些操作是In-place的、将计算图中的参数更新操作进行融合等。
+用户还可以调整 :code:`ParallelExecutor` 执行过程中的一些配置,比如执行计算图的线程数等。这些配置分别是构建策略(BuildStrategy)和执行策略(ExecutionStrategy)参数来设置的。
+
+
+一个简单的使用示例如下:
+
+.. code-block:: python
+
+ build_strategy = fluid.BuildStrategy()
+ build_strategy.enable_inplace = True
+ build_strategy.fuse_all_optimizer_ops=True
+
+ exec_strategy = fluid.ExecutionStrategy()
+ exec_strategy.num_threads = 4
+
+ train_program = fluid.compiler.CompiledProgram(main_program).with_data_parallel(
+ loss_name=loss.name,
+ build_strategy=build_strategy,
+ exec_strategy=exec_strategy)
+
+ place = fluid.CUDAPlace(0)
+ exe = Executor(place)
+ # 使用DataLoader读取数据,因此执行时不需要设置feed
+ fetch_outs = exe.run(train_program, fetch_list=[loss.name])
-.. csv-table::
+
+
+3.2 构建策略(BuildStrategy)配置参数介绍
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BuildStrategy中提供了一些关于计算图优化的策略,这些策略可以在不同程度上提升模型的训练速度,但是其中一些策略与模型的结构有关,比如 :code:`fuse_all_optimizer_ops` 不支持sparse梯度,我们正在积极的完善这些策略,并在下一个版本将这些策略默认打开。
+
+构建策略的详细介绍如下:
+
+.. csv-table::
:header: "选项", "类型", "默认值", "说明"
:widths: 3, 3, 3, 5
- ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练."
- ":code:`enable_backward_optimizer_op_deps`", "bool", "FALSE", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作."
- ":code:`fuse_all_optimizer_ops`", "bool", "FALSE", "对模型中的参数更新算法进行融合."
- ":code:`fuse_all_reduce_ops`", "bool", "FALSE", "多卡训练时,将all_reduce Op进行融合."
- ":code:`fuse_relu_depthwise_conv`", "bool", "FALSE", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个."
- ":code:`fuse_broadcast_ops`", "bool", "FALSE", "在 :code:`Reduce` 模式下,对最后的多个Broadcast操作融合为一个."
- ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库,如果不进行设置,模型可以使用MKLDNN库的所有操作都会使用MKLDNN库."
+ ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练。"
+ ":code:`enable_backward_optimizer_op_deps`", "bool", "True", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作。"
+ ":code:`fuse_all_optimizer_ops`", "bool", "False", "对模型中的参数更新算法进行融合。"
+ ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将all_reduce操作进行融合。"
+ ":code:`fuse_relu_depthwise_conv`", "bool", "False", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个。"
+ ":code:`fuse_broadcast_ops`", "bool", "False", "在 :code:`Reduce` 模式下,将最后的多个Broadcast操作融合为一个。"
+ ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库。默认情况下,模型中用到的操作如果在Paddle目前支持的可以使用mkldnn库计算的列表中,这些操作都会调用mkldnn库的接口进行计算。"
+ ":code:`debug_graphviz_path`", "str", "{}", "将Graph以graphviz格式输出到debug_graphviz_path所指定的文件中。"
-说明:
- - 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上.
- - 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。
- - 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。**注意:目前不支持sparse参数梯度** 。
- - 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。**注意:目前不支持sparse参数梯度**。
- - 关于 :code:`mkldnn_enabled_op_types` ,支持mkldnn库的Op有:transpose, sum, softmax, requantize, quantize, pool2d, lrn, gaussian_random, fc, dequantize, conv2d_transpose, conv2d, conv3d, concat, batch_norm, relu, tanh, sqrt, abs.
+参数说明:
-3.3 ExecutionStrategy中的配置参数
-^^^^^^^^^^^^^^^^
-ExecutionStrategy配置选项
->>>>>>>>>>>>>>>
+(1) 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上。注意:如果是使用CPU进行数据并行训练,在Reduce模式下,不同CPUPlace上的参数是共享的,所以在各个CPUPlace上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace。
+
+(2) 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。
+
+(3) 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。 **注意:目前不支持sparse参数梯度** 。
+
+(4) 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。 **注意:目前不支持sparse参数梯度** 。
+
+(5) 关于 :code:`mkldnn_enabled_op_types` ,目前Paddle的Op中可以使用mkldnn库计算的操作包括:transpose、sum、softmax、requantize、quantize、pool2d、lrn、gaussian_random、fc、dequantize、conv2d_transpose、conv2d、conv3d、concat、batch_norm、relu、tanh、sqrt、abs。
+
+
+3.3 执行策略(ExecutionStrategy)配置参数介绍
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ExecutionStrategy中提供了关于计算图执行时的一些配置,这些配置可能会影响模型的训练速度。同时,这些配置与模型的结构有关,如果用户希望模型训练速度更快,可以调整一下这些配置。在后续的优化中,我们会对这部分进行优化,根据输入模型结构动态调整这些设置。
+
+ExecutionStrategy配置选项说明:
-.. csv-table::
+.. csv-table::
:header: "选项", "类型", "默认值", "说明"
:widths: 3, 3, 5, 5
- ":code:`num_iteration_per_drop_scope`", "INT", "1", "经过多少次迭代之后清理一次local execution scope"
+ ":code:`num_iteration_per_drop_scope`", "INT", "100", "经过多少次迭代之后清理一次local execution scope"
":code:`num_threads`", "INT", "对于CPU:2*dev_count;对于GPU:4*dev_count. (这是一个经验值)", ":code:`ParallelExecutor` 中执行所有Op使用的线程池大小"
说明:
- - 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,这些变量被放在local execution scope中。通常每经过一个batch就要清理一下local execution scope中的变量,但是由于GPU是异步设备,在清理local execution scope之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在 :code:`execution_strategy` 中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次local execution scope。
- - 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。**注意:线程池不是越大越好**。
-执行策略配置推荐
->>>>>>>>>>>>>>>
+(1) 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,默认每经过一个batch就要清理一下临时变量。由于GPU是异步设备,在清理之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在execution_strategy中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次。
-- 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置 :code:`exec_strategy.num_iteration_per_drop_scope=100` ,这样可以避免反复地申请和释放内存。该配置对于一些模型的优化效果较为明显。
-- 对于一些较小的模型,比如mnist、language_model等,多个线程乱序调度op的开销大于其收益,因此推荐设置 :code:`exec_strategy.num_threads=1` 。
+(2) 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。 **注意:线程池不是越大越好** 。
-CPU训练设置
->>>>>>>>>>>>>>>
-- 如果使用CPU做数据并行训练,需要指定环境变量CPU_NUM,这个环境变量指定程序运行过程中使用的 :code:`CPUPlace` 的个数。
-- 如果使用CPU进行数据并行训练,并且 :code:`build_strategy.reduce_strategy` = :code:`fluid.BuildStrategy.ReduceStrategy.Reduce` ,所有 :code:`CPUPlace` 上的参数是共享的,因此对于一些使用CPU进行数据并行训练的模型,选用 :code:`Reduce` 模式可能会更快一些。
+4. 运行时FLAGS设置优化
+=================
-4. 运行时FLAGS设置
-=============
-Fluid中有一些FLAGS可以有助于性能优化:
+Paddle中有一些FLAGS可以有助于性能优化:
-- FLAGS_fraction_of_gpu_memory_to_use表示每次分配GPU显存的最小单位,取值范围为[0, 1)。由于CUDA原生的显存分配cuMalloc和释放cuFree操作均是同步操作,非常耗时,因此将FLAGS_fraction_of_gpu_memory_to_use设置成一个较大的值,比如0.92(默认值),可以显著地加速训练的速度。
-- FLAGS_cudnn_exhaustive_search表示cuDNN在选取conv实现算法时采取穷举搜索策略,因此往往能选取到一个更快的conv实现算法,这对于CNN网络通常都是有加速的。但穷举搜索往往也会增加cuDNN的显存需求,因此用户可根据模型的实际情况选择是否设置该变量。
-- FLAGS_enable_cublas_tensor_op_math表示是否使用TensorCore加速计算cuBLAS。这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失。
+(1) :code:`FLAGS_cudnn_exhaustive_search` 表示在调用cuDNN中的卷积操作时,根据输入数据的shape等信息,采取穷举搜索的策略从算法库中选取到更快的卷积算法,进而实现对模型中卷积操作的加速。需要注意的是:
+ - 在搜索算法过程中需要使用较多的显存,如果用户的模型中卷积操作较多,或者GPU卡显存较小,可能会出现显存不足问题。
+ - 通过穷举搜索选择好算法之后,该算法会进入Cache,以便下次运行时,如果输入数据的shape等信息不变,直接使用Cache中算法。
-5. 使用Profile工具进行性能分析
-=============
+(2) :code:`FLAGS_enable_cublas_tensor_op_math` 表示是否使用TensorCore加速cuBLAS等NV提供的库中的操作。需要注意的是,这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失,通常该损失不会影响模型的收敛性。
+
+
+5. 优秀实践
+=================
+
+(1) 尽可能的使用飞桨提供的单个layer实现所需操作。
+(2) 采用异步数据读取。
+(3) 模型训练相关优化:
+
+ - 使用ParallelExecutor作为底层执行器。单卡训练,也可以调用with_data_parallel方法。代码示例:
+
+ .. code-block:: python
+
+ compiled_prog = compiler.CompiledProgram(
+ fluid.default_main_program()).with_data_parallel(
+ loss_name=loss.name)
+
+ - 如果模型中参数的梯度都是非sparse的,可以打开fuse_all_optimizer_ops选项,将多个参数更新操作融合为一个。
+ - 如果是多卡训练,可以打开enable_backward_optimizer_op_deps、fuse_all_reduce_ops选项。如果想指定每次每次AllReduce操作的数据大小,可以设置 :code:`FLAGS_fuse_parameter_memory_size`,比如 :code:`export FLAGS_fuse_parameter_memory_size=1` ,表示每次AllReduce调用传输1MB的梯度。
+ - 使用CPU做数据并行训练时,推荐使用Reduce模型,因为在使用CPU进行数据并行训练时,在Reduce模式下,不同CPUPlace 上的参数是共享的,所以在各个CPUPlace 上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace上,这对提升速度也有很大帮助。
+ - 如果是Reduce模式,可打开fuse_broadcast_ops选项。
+ - 如果用户的模型较小,比如mnist、language_model等,可以将num_threads设为1。
+ - 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置为100,这样可以避免反复地申请和释放内存。
+
+目前我们正在推进这些配置自动化的工作:即根据输入的模型结构自动配置这些选项,争取在下一个版本中实现,敬请期待。
+
+(4) FLAGS设置
+
+.. code-block:: bash
+
+ FLAGS_cudnn_exhaustive_search = True
+ FLAGS_enable_cublas_tensor_op_math = True
+
+
+6. 使用Profile工具进行性能分析
+======================
-为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 `性能调优 `_ 。
+为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 :ref:`api_guide_analysis_tools` 。
diff --git a/doc/fluid/api/dataset.rst b/doc/fluid/api/dataset.rst
index a8b6bcab25b1a77b30678852c9d0d35153943b40..f86bb206329ab3599e780586a9a4d7cec7a93fa8 100644
--- a/doc/fluid/api/dataset.rst
+++ b/doc/fluid/api/dataset.rst
@@ -1,5 +1,5 @@
=============
-fluid.dataset
+paddle.dataset
=============
.. toctree::
diff --git a/doc/fluid/api/declarative.rst b/doc/fluid/api/declarative.rst
new file mode 100644
index 0000000000000000000000000000000000000000..93046be14be068b36748c3ad7d611ac5b443df4c
--- /dev/null
+++ b/doc/fluid/api/declarative.rst
@@ -0,0 +1,28 @@
+=======================
+paddle.declarative
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ declarative/batch_norm.rst
+ declarative/bilinear_tensor_product.rst
+ declarative/conv2d.rst
+ declarative/conv2d_transpose.rst
+ declarative/conv3d.rst
+ declarative/conv3d_transpose.rst
+ declarative/create_parameter.rst
+ declarative/crf_decoding.rst
+ declarative/data_norm.rst
+ declarative/deformable_conv.rst
+ declarative/embedding.rst
+ declarative/fc.rst
+ declarative/group_norm.rst
+ declarative/hsigmoid.rst
+ declarative/instance_norm.rst
+ declarative/layer_norm.rst
+ declarative/multi_box_head.rst
+ declarative/nce.rst
+ declarative/prelu.rst
+ declarative/row_conv.rst
+ declarative/spectral_norm.rst
diff --git a/doc/fluid/api/declarative/batch_norm.rst b/doc/fluid/api/declarative/batch_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..23971945e133fd96ada68d9d197bd71fe947c2fd
--- /dev/null
+++ b/doc/fluid/api/declarative/batch_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_batch_norm:
+
+batch_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.batch_norm
+
+
diff --git a/doc/fluid/api/declarative/bilinear_tensor_product.rst b/doc/fluid/api/declarative/bilinear_tensor_product.rst
new file mode 100644
index 0000000000000000000000000000000000000000..122717ae08cfda5647a4f53a7b2a33bd0b715aea
--- /dev/null
+++ b/doc/fluid/api/declarative/bilinear_tensor_product.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_bilinear_tensor_product:
+
+bilinear_tensor_product
+-------------------------------
+:doc_source: paddle.fluid.layers.bilinear_tensor_product
+
+
diff --git a/doc/fluid/api/declarative/conv2d.rst b/doc/fluid/api/declarative/conv2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ba8cff6e43af600c77880540ae401c0184651c21
--- /dev/null
+++ b/doc/fluid/api/declarative/conv2d.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_conv2d:
+
+conv2d
+-------------------------------
+:doc_source: paddle.fluid.layers.conv2d
+
+
diff --git a/doc/fluid/api/declarative/conv2d_transpose.rst b/doc/fluid/api/declarative/conv2d_transpose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1a24283c8b91efb43100bc954960c6d15e68c7f5
--- /dev/null
+++ b/doc/fluid/api/declarative/conv2d_transpose.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_conv2d_transpose:
+
+conv2d_transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.conv2d_transpose
+
+
diff --git a/doc/fluid/api/declarative/conv3d.rst b/doc/fluid/api/declarative/conv3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3e2bbaacc284f46e6f09b2512d2c863291c0d7c8
--- /dev/null
+++ b/doc/fluid/api/declarative/conv3d.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_conv3d:
+
+conv3d
+-------------------------------
+:doc_source: paddle.fluid.layers.conv3d
+
+
diff --git a/doc/fluid/api/declarative/conv3d_transpose.rst b/doc/fluid/api/declarative/conv3d_transpose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08a5b8104ab70f856ec67a71dbbe9475413a6663
--- /dev/null
+++ b/doc/fluid/api/declarative/conv3d_transpose.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_conv3d_transpose:
+
+conv3d_transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.conv3d_transpose
+
+
diff --git a/doc/fluid/api/declarative/create_parameter.rst b/doc/fluid/api/declarative/create_parameter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..25d15ecbc4fcced99430ce5a5887c1ff369b2f19
--- /dev/null
+++ b/doc/fluid/api/declarative/create_parameter.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_create_parameter:
+
+create_parameter
+-------------------------------
+:doc_source: paddle.fluid.layers.create_parameter
+
+
diff --git a/doc/fluid/api/declarative/crf_decoding.rst b/doc/fluid/api/declarative/crf_decoding.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc7fb3d705d302ef74974cbb6daa9198de5b9b63
--- /dev/null
+++ b/doc/fluid/api/declarative/crf_decoding.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_crf_decoding:
+
+crf_decoding
+-------------------------------
+:doc_source: paddle.fluid.layers.crf_decoding
+
+
diff --git a/doc/fluid/api/declarative/data_norm.rst b/doc/fluid/api/declarative/data_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..87d7aa57d5f144261349dc946ac37cace909cd89
--- /dev/null
+++ b/doc/fluid/api/declarative/data_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_data_norm:
+
+data_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.data_norm
+
+
diff --git a/doc/fluid/api/declarative/deformable_conv.rst b/doc/fluid/api/declarative/deformable_conv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a0d39dc4cad5fe0e103d3e3a43a81be97fc36f3b
--- /dev/null
+++ b/doc/fluid/api/declarative/deformable_conv.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_deformable_conv:
+
+deformable_conv
+-------------------------------
+:doc_source: paddle.fluid.layers.deformable_conv
+
+
diff --git a/doc/fluid/api/declarative/embedding.rst b/doc/fluid/api/declarative/embedding.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d37d2c77ab287184ba7f0a1bfa2370234b0813aa
--- /dev/null
+++ b/doc/fluid/api/declarative/embedding.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_embedding:
+
+embedding
+-------------------------------
+:doc_source: paddle.fluid.input.embedding
+
+
diff --git a/doc/fluid/api/declarative/fc.rst b/doc/fluid/api/declarative/fc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97ed17e4d4c291c9d770a205e5749d72f4ca0a57
--- /dev/null
+++ b/doc/fluid/api/declarative/fc.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_fc:
+
+fc
+-------------------------------
+:doc_source: paddle.fluid.layers.fc
+
+
diff --git a/doc/fluid/api/declarative/group_norm.rst b/doc/fluid/api/declarative/group_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bbc40ce66cfc4f632452d038f10492fb59ec3d3a
--- /dev/null
+++ b/doc/fluid/api/declarative/group_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_group_norm:
+
+group_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.group_norm
+
+
diff --git a/doc/fluid/api/declarative/hsigmoid.rst b/doc/fluid/api/declarative/hsigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c7d5264f242b40cdaaa629eb4d1b082ec1fe6840
--- /dev/null
+++ b/doc/fluid/api/declarative/hsigmoid.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_hsigmoid:
+
+hsigmoid
+-------------------------------
+:doc_source: paddle.fluid.layers.hsigmoid
+
+
diff --git a/doc/fluid/api/declarative/instance_norm.rst b/doc/fluid/api/declarative/instance_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..25adf44297f53110c62515f20bec49a60ec17654
--- /dev/null
+++ b/doc/fluid/api/declarative/instance_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_instance_norm:
+
+instance_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.instance_norm
+
+
diff --git a/doc/fluid/api/declarative/layer_norm.rst b/doc/fluid/api/declarative/layer_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d9826c7804691db7b884504294d65febc89ed765
--- /dev/null
+++ b/doc/fluid/api/declarative/layer_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_layer_norm:
+
+layer_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.layer_norm
+
+
diff --git a/doc/fluid/api/declarative/multi_box_head.rst b/doc/fluid/api/declarative/multi_box_head.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a80a8187a26e5923cde16b6e12c13f66c6744655
--- /dev/null
+++ b/doc/fluid/api/declarative/multi_box_head.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_multi_box_head:
+
+multi_box_head
+-------------------------------
+:doc_source: paddle.fluid.layers.multi_box_head
+
+
diff --git a/doc/fluid/api/declarative/nce.rst b/doc/fluid/api/declarative/nce.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9fe774511e603ba46f7a409fe750f7cdc7e4b4a5
--- /dev/null
+++ b/doc/fluid/api/declarative/nce.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_nce:
+
+nce
+-------------------------------
+:doc_source: paddle.fluid.layers.nce
+
+
diff --git a/doc/fluid/api/declarative/prelu.rst b/doc/fluid/api/declarative/prelu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc1bc138610fd5a15b0ca177c713a26c8ac449
--- /dev/null
+++ b/doc/fluid/api/declarative/prelu.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_prelu:
+
+prelu
+-------------------------------
+:doc_source: paddle.fluid.layers.prelu
+
+
diff --git a/doc/fluid/api/declarative/row_conv.rst b/doc/fluid/api/declarative/row_conv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..69942f0e0c3246904f546ae8fd649dab84dcaeb4
--- /dev/null
+++ b/doc/fluid/api/declarative/row_conv.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_row_conv:
+
+row_conv
+-------------------------------
+:doc_source: paddle.fluid.layers.row_conv
+
+
diff --git a/doc/fluid/api/declarative/spectral_norm.rst b/doc/fluid/api/declarative/spectral_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1f797b8503a4f1f0ac3a2c569ecb0d8426638d66
--- /dev/null
+++ b/doc/fluid/api/declarative/spectral_norm.rst
@@ -0,0 +1,7 @@
+.. _api_declarative_spectral_norm:
+
+spectral_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.spectral_norm
+
+
diff --git a/doc/fluid/api/distributed.rst b/doc/fluid/api/distributed.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fddddbb11adca1374a6b496eb26e4d8468a3d13c
--- /dev/null
+++ b/doc/fluid/api/distributed.rst
@@ -0,0 +1,13 @@
+==================
+paddle.distributed
+==================
+
+.. toctree::
+ :maxdepth: 1
+
+ distributed/get_rank.rst
+ distributed/get_world_size.rst
+ distributed/init_parallel_env.rst
+ distributed/ParallelEnv.rst
+ distributed/prepare_context.rst
+ distributed/spawn.rst
diff --git a/doc/fluid/api/distributed/ParallelEnv.rst b/doc/fluid/api/distributed/ParallelEnv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..46b07c64f0358c4bf322e1325e2c6cb31fd2ea33
--- /dev/null
+++ b/doc/fluid/api/distributed/ParallelEnv.rst
@@ -0,0 +1,5 @@
+.. _api_distributed_ParallelEnv:
+
+ParallelEnv
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv
\ No newline at end of file
diff --git a/doc/fluid/api/distributed/get_rank.rst b/doc/fluid/api/distributed/get_rank.rst
new file mode 100644
index 0000000000000000000000000000000000000000..98a64831423e486877b47db8cbda7a54d1849f20
--- /dev/null
+++ b/doc/fluid/api/distributed/get_rank.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distributed_get_rank:
+
+get_rank
+--------
+
+.. autofunction:: paddle.distributed.get_rank
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/distributed/get_world_size.rst b/doc/fluid/api/distributed/get_world_size.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2de447e1bc276586e2220b043e9fffe48d21f6db
--- /dev/null
+++ b/doc/fluid/api/distributed/get_world_size.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distributed_get_world_size:
+
+get_world_size
+--------------
+
+.. autofunction:: paddle.distributed.get_world_size
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/distributed/init_parallel_env.rst b/doc/fluid/api/distributed/init_parallel_env.rst
new file mode 100644
index 0000000000000000000000000000000000000000..99473dd347676e0f88cce8454ccccf60c5e1da17
--- /dev/null
+++ b/doc/fluid/api/distributed/init_parallel_env.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distributed_init_parallel_env:
+
+init_parallel_env
+-----------------
+
+.. autofunction:: paddle.distributed.init_parallel_env
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/distributed/prepare_context.rst b/doc/fluid/api/distributed/prepare_context.rst
new file mode 100644
index 0000000000000000000000000000000000000000..abe6865f52ff82032a7e9873f492e5b531407ebf
--- /dev/null
+++ b/doc/fluid/api/distributed/prepare_context.rst
@@ -0,0 +1,5 @@
+.. _api_distributed_prepare_context:
+
+prepare_context
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.prepare_context
diff --git a/doc/fluid/api/distributed/spawn.rst b/doc/fluid/api/distributed/spawn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9c7895932fce5a5c7e55850eb37caabe680ef724
--- /dev/null
+++ b/doc/fluid/api/distributed/spawn.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distributed_spawn:
+
+spawn
+-----
+
+.. autofunction:: paddle.distributed.spawn
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/dygraph.rst b/doc/fluid/api/dygraph.rst
index 8f78d0e9e4334b53e39e52a308af8bb8d1892ddb..1df3cded70c9a2ca385a4a6138f3b340da2739aa 100644
--- a/doc/fluid/api/dygraph.rst
+++ b/doc/fluid/api/dygraph.rst
@@ -5,7 +5,6 @@ fluid.dygraph
.. toctree::
:maxdepth: 1
- dygraph/BackwardStrategy.rst
dygraph/BatchNorm.rst
dygraph/BilinearTensorProduct.rst
dygraph/Conv2D.rst
@@ -13,13 +12,25 @@ fluid.dygraph
dygraph/Conv3D.rst
dygraph/Conv3DTranspose.rst
dygraph/CosineDecay.rst
+ dygraph/DataParallel.rst
+ dygraph/declarative.rst
+ dygraph/disable_dygraph.rst
+ dygraph/Dropout.rst
+ dygraph/dygraph_to_static_code.rst
+ dygraph/dygraph_to_static_func.rst
dygraph/dygraph_to_static_output.rst
+ dygraph/dygraph_to_static_program.rst
dygraph/Embedding.rst
+ dygraph/enable_dygraph.rst
+ dygraph/enabled.rst
dygraph/ExponentialDecay.rst
+ dygraph/grad.rst
dygraph/GroupNorm.rst
dygraph/GRUUnit.rst
dygraph/guard.rst
+ dygraph/InstanceNorm.rst
dygraph/InverseTimeDecay.rst
+ dygraph/jit.rst
dygraph/Layer.rst
dygraph/LayerList.rst
dygraph/LayerNorm.rst
@@ -29,16 +40,20 @@ fluid.dygraph
dygraph/NCE.rst
dygraph/no_grad.rst
dygraph/NoamDecay.rst
+ dygraph/ParallelEnv.rst
dygraph/ParameterList.rst
dygraph/PiecewiseDecay.rst
dygraph/PolynomialDecay.rst
dygraph/Pool2D.rst
dygraph/PRelu.rst
dygraph/prepare_context.rst
+ dygraph/ProgramTranslator.rst
+ dygraph/ReduceLROnPlateau.rst
dygraph/save_dygraph.rst
dygraph/Sequential.rst
dygraph/SpectralNorm.rst
dygraph/to_variable.rst
dygraph/TracedLayer.rst
dygraph/Tracer.rst
+ dygraph/TranslatedLayer.rst
dygraph/TreeConv.rst
diff --git a/doc/fluid/api/dygraph/BackwardStrategy.rst b/doc/fluid/api/dygraph/BackwardStrategy.rst
deleted file mode 100644
index c818ed2051d52cbe55d63c0456614220e9401058..0000000000000000000000000000000000000000
--- a/doc/fluid/api/dygraph/BackwardStrategy.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_dygraph_BackwardStrategy:
-
-BackwardStrategy
-----------------
-
-.. autoclass:: paddle.fluid.dygraph.BackwardStrategy
- :members:
- :noindex:
-
diff --git a/doc/fluid/api/dygraph/DataParallel.rst b/doc/fluid/api/dygraph/DataParallel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..33c8a3fd80181e529c1c6728f867cc952b818eb4
--- /dev/null
+++ b/doc/fluid/api/dygraph/DataParallel.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_DataParallel:
+
+DataParallel
+------------
+
+.. autoclass:: paddle.fluid.dygraph.DataParallel
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/Dropout.rst b/doc/fluid/api/dygraph/Dropout.rst
new file mode 100644
index 0000000000000000000000000000000000000000..697101e001c6a9b0b03ffe430aa80435fee5529d
--- /dev/null
+++ b/doc/fluid/api/dygraph/Dropout.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_Dropout:
+
+Dropout
+-------
+
+.. autoclass:: paddle.fluid.dygraph.Dropout
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/GRUCell.rst b/doc/fluid/api/dygraph/GRUCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..30d104af9d4084233fb2a12df2ad2042b00f8e89
--- /dev/null
+++ b/doc/fluid/api/dygraph/GRUCell.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_GRUCell:
+
+GRUCell
+-------
+
+.. autoclass:: paddle.fluid.dygraph.GRUCell
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/InstanceNorm.rst b/doc/fluid/api/dygraph/InstanceNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2e09ea9754bbceb1b1af9111dcfcfb404e0c70f4
--- /dev/null
+++ b/doc/fluid/api/dygraph/InstanceNorm.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_InstanceNorm:
+
+InstanceNorm
+---------
+
+.. autoclass:: paddle.fluid.dygraph.InstanceNorm
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/LSTMCell.rst b/doc/fluid/api/dygraph/LSTMCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..243ab36f9ec85701aa501ab25a9ee4cdc5e3ec5d
--- /dev/null
+++ b/doc/fluid/api/dygraph/LSTMCell.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_LSTMCell:
+
+LSTMCell
+--------
+
+.. autoclass:: paddle.fluid.dygraph.LSTMCell
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/ParallelEnv.rst b/doc/fluid/api/dygraph/ParallelEnv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..25514b2bf3de97aec0b4e1bca109e5f52534dfe3
--- /dev/null
+++ b/doc/fluid/api/dygraph/ParallelEnv.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_ParallelEnv:
+
+ParallelEnv
+-----------
+
+.. autoclass:: paddle.fluid.dygraph.ParallelEnv
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/ReduceLROnPlateau.rst b/doc/fluid/api/dygraph/ReduceLROnPlateau.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d03ce41e1d45d51c2fe611c3f5607a399ca6cf3b
--- /dev/null
+++ b/doc/fluid/api/dygraph/ReduceLROnPlateau.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_ReduceLROnPlateau:
+
+ReduceLROnPlateau
+-----------------
+
+.. autoclass:: paddle.fluid.dygraph.ReduceLROnPlateau
+ :members:
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/TranslatedLayer.rst b/doc/fluid/api/dygraph/TranslatedLayer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a6f7fd9411e5179999a8bda3f1ae197092343a7a
--- /dev/null
+++ b/doc/fluid/api/dygraph/TranslatedLayer.rst
@@ -0,0 +1,8 @@
+.. _api_fluid_dygraph_TranslatedLayer:
+
+TranslatedLayer
+-----------------------
+
+.. autoclass:: paddle.fluid.dygraph.TranslatedLayer
+ :members:
+ :noindex:
diff --git a/doc/fluid/api/dygraph/disable_dygraph.rst b/doc/fluid/api/dygraph/disable_dygraph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..17adf7a7559fe31260a4d37b618bafa5e0575b57
--- /dev/null
+++ b/doc/fluid/api/dygraph/disable_dygraph.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_disable_dygraph:
+
+disable_dygraph
+---------------
+
+.. autofunction:: paddle.fluid.dygraph.disable_dygraph
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/dygraph_to_static_code.rst b/doc/fluid/api/dygraph/dygraph_to_static_code.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bd6af528d903316df1c1c03f63392ace8af7c55b
--- /dev/null
+++ b/doc/fluid/api/dygraph/dygraph_to_static_code.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_dygraph_to_static_code:
+
+dygraph_to_static_code
+----------------------
+
+.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_code
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/dygraph_to_static_func.rst b/doc/fluid/api/dygraph/dygraph_to_static_func.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d73ac96d88263e5c759b260ddd7dd45f57b9fe71
--- /dev/null
+++ b/doc/fluid/api/dygraph/dygraph_to_static_func.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_dygraph_to_static_func:
+
+dygraph_to_static_func
+----------------------
+
+.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_func
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/dygraph_to_static_program.rst b/doc/fluid/api/dygraph/dygraph_to_static_program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1f481533654b6db79b70a3a9b2235ec8a9696ec5
--- /dev/null
+++ b/doc/fluid/api/dygraph/dygraph_to_static_program.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_dygraph_to_static_program:
+
+dygraph_to_static_program
+-------------------------
+
+.. autofunction:: paddle.fluid.dygraph.dygraph_to_static_program
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/enable_dygraph.rst b/doc/fluid/api/dygraph/enable_dygraph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..02dfdcd457761c8533118bcd7b505f427ab5d849
--- /dev/null
+++ b/doc/fluid/api/dygraph/enable_dygraph.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_enable_dygraph:
+
+enable_dygraph
+--------------
+
+.. autofunction:: paddle.fluid.dygraph.enable_dygraph
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/enabled.rst b/doc/fluid/api/dygraph/enabled.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc2bfa7649ef185be254a6a71161b613726e7449
--- /dev/null
+++ b/doc/fluid/api/dygraph/enabled.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_dygraph_enabled:
+
+enabled
+-------
+
+.. autofunction:: paddle.fluid.dygraph.enabled
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/grad.rst b/doc/fluid/api/dygraph/grad.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01973660288d4148827cd8a7cc19584950b1ed9a
--- /dev/null
+++ b/doc/fluid/api/dygraph/grad.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_paddle_grad:
+
+grad
+----
+
+.. autofunction:: paddle.grad
+ :noindex:
+
diff --git a/doc/fluid/api/dygraph/jit.rst b/doc/fluid/api/dygraph/jit.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7853a048535c045bae18f71c8b4d7f1e44cc65eb
--- /dev/null
+++ b/doc/fluid/api/dygraph/jit.rst
@@ -0,0 +1,10 @@
+===
+jit
+===
+
+.. toctree::
+ :maxdepth: 1
+
+ jit/save.rst
+ jit/load.rst
+ jit/SaveLoadConfig.rst
diff --git a/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst b/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e8d1d3bfbc35eca0c05594b540a0cd15c19cebe1
--- /dev/null
+++ b/doc/fluid/api/dygraph/jit/SaveLoadConfig.rst
@@ -0,0 +1,8 @@
+.. _api_fluid_dygraph_jit_SaveLoadConfig:
+
+SaveLoadConfig
+-------------------------------
+
+.. autoclass:: paddle.fluid.dygraph.jit.SaveLoadConfig
+ :members:
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/dygraph/jit/load.rst b/doc/fluid/api/dygraph/jit/load.rst
new file mode 100644
index 0000000000000000000000000000000000000000..51f59909873dd46bb43e42bdc2258a990580c24c
--- /dev/null
+++ b/doc/fluid/api/dygraph/jit/load.rst
@@ -0,0 +1,7 @@
+.. _api_fluid_dygraph_jit_load:
+
+load
+------------
+
+.. autofunction:: paddle.fluid.dygraph.jit.load
+ :noindex:
diff --git a/doc/fluid/api/dygraph/jit/save.rst b/doc/fluid/api/dygraph/jit/save.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb55029c2870b8c56edd93c4907ae0894036eabe
--- /dev/null
+++ b/doc/fluid/api/dygraph/jit/save.rst
@@ -0,0 +1,7 @@
+.. _api_fluid_dygraph_jit_save:
+
+save
+------------
+
+.. autofunction:: paddle.fluid.dygraph.jit.save
+ :noindex:
diff --git a/doc/fluid/api/fluid.rst b/doc/fluid/api/fluid.rst
index 1c07ca0a011d02488f833d6474e19985172508c8..1440d541e1d7abe78aa11609c33ceda0ce8a4f93 100644
--- a/doc/fluid/api/fluid.rst
+++ b/doc/fluid/api/fluid.rst
@@ -7,6 +7,7 @@ fluid
fluid/BuildStrategy.rst
fluid/CompiledProgram.rst
+ fluid/ComplexVariable.rst
fluid/cpu_places.rst
fluid/CPUPlace.rst
fluid/create_lod_tensor.rst
@@ -20,11 +21,15 @@ fluid
fluid/DataFeeder.rst
fluid/default_main_program.rst
fluid/default_startup_program.rst
+ fluid/device_guard.rst
+ fluid/disable_dygraph.rst
fluid/DistributeTranspiler.rst
fluid/DistributeTranspilerConfig.rst
fluid/embedding.rst
+ fluid/enable_dygraph.rst
fluid/ExecutionStrategy.rst
fluid/Executor.rst
+ fluid/get_flags.rst
fluid/global_scope.rst
fluid/gradients.rst
fluid/in_dygraph_mode.rst
@@ -44,6 +49,7 @@ fluid
fluid/require_version.rst
fluid/save.rst
fluid/scope_guard.rst
+ fluid/set_flags.rst
fluid/Tensor.rst
fluid/Variable.rst
fluid/WeightNormParamAttr.rst
diff --git a/doc/fluid/api/fluid/device_guard.rst b/doc/fluid/api/fluid/device_guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d8d611168644c45322972669bdd2806f393bcf43
--- /dev/null
+++ b/doc/fluid/api/fluid/device_guard.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_device_guard:
+
+device_guard
+------------
+
+.. autofunction:: paddle.fluid.device_guard
+ :noindex:
+
diff --git a/doc/fluid/api/fluid/disable_dygraph.rst b/doc/fluid/api/fluid/disable_dygraph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..481ab4a5a8de51006f976b012660eca123b2a39b
--- /dev/null
+++ b/doc/fluid/api/fluid/disable_dygraph.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_disable_dygraph:
+
+disable_dygraph
+---------------
+
+.. autofunction:: paddle.fluid.disable_dygraph
+ :noindex:
+
diff --git a/doc/fluid/api/fluid/enable_dygraph.rst b/doc/fluid/api/fluid/enable_dygraph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..389919cfc96cbaaf2ac2dbb83267f3af93a19e87
--- /dev/null
+++ b/doc/fluid/api/fluid/enable_dygraph.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_enable_dygraph:
+
+enable_dygraph
+--------------
+
+.. autofunction:: paddle.fluid.enable_dygraph
+ :noindex:
+
diff --git a/doc/fluid/api/fluid/get_flags.rst b/doc/fluid/api/fluid/get_flags.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2432965408118fe7c58d2898c2871391a32750a2
--- /dev/null
+++ b/doc/fluid/api/fluid/get_flags.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_get_flags:
+
+get_flags
+---------
+
+.. autofunction:: paddle.fluid.get_flags
+ :noindex:
+
diff --git a/doc/fluid/api/fluid/set_flags.rst b/doc/fluid/api/fluid/set_flags.rst
new file mode 100644
index 0000000000000000000000000000000000000000..730438b200ee575912c940d616f0dbffdcf73d41
--- /dev/null
+++ b/doc/fluid/api/fluid/set_flags.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_set_flags:
+
+set_flags
+---------
+
+.. autofunction:: paddle.fluid.set_flags
+ :noindex:
+
diff --git a/doc/fluid/api/framework.rst b/doc/fluid/api/framework.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f95b77c5cb7b1907aaaa58b9e620e9447c9b66f5
--- /dev/null
+++ b/doc/fluid/api/framework.rst
@@ -0,0 +1,31 @@
+=======================
+paddle.framework
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ framework/append_backward.rst
+ framework/BuildStrategy.rst
+ framework/CompiledProgram.rst
+ framework/CPUPlace.rst
+ framework/create_global_var.rst
+ framework/create_parameter.rst
+ framework/CUDAPinnedPlace.rst
+ framework/CUDAPlace.rst
+ framework/default_main_program.rst
+ framework/default_startup_program.rst
+ framework/ExecutionStrateg y.rst
+ framework/Executor.rst
+ framework/global_scope.rst
+ framework/gradients.rst
+ framework/name_scope.rst
+ framework/ParallelExecutor.rst
+ framework/ParamAttr.rst
+ framework/Print.rst
+ framework/Program.rst
+ framework/program_guard.rst
+ framework/py_func.rst
+ framework/scope_guard.rst
+ framework/Variable.rst
+ framework/WeightNormParamAttr.rst
diff --git a/doc/fluid/api/framework/BuildStrategy.rst b/doc/fluid/api/framework/BuildStrategy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..94384f0b54e63183e9214419141e87112b0db443
--- /dev/null
+++ b/doc/fluid/api/framework/BuildStrategy.rst
@@ -0,0 +1,7 @@
+.. _api_framework_BuildStrategy:
+
+BuildStrategy
+-------------------------------
+:doc_source: paddle.fluid.compiler.BuildStrategy
+
+
diff --git a/doc/fluid/api/framework/CPUPlace.rst b/doc/fluid/api/framework/CPUPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..322c101361512ef6ced7a2e1430cc9570ecdfd73
--- /dev/null
+++ b/doc/fluid/api/framework/CPUPlace.rst
@@ -0,0 +1,7 @@
+.. _api_framework_CPUPlace:
+
+CPUPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CPUPlace
+
+
diff --git a/doc/fluid/api/framework/CUDAPinnedPlace.rst b/doc/fluid/api/framework/CUDAPinnedPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..912861885f41674cda40dd9c3d8438e10141181b
--- /dev/null
+++ b/doc/fluid/api/framework/CUDAPinnedPlace.rst
@@ -0,0 +1,7 @@
+.. _api_framework_CUDAPinnedPlace:
+
+CUDAPinnedPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPinnedPlace
+
+
diff --git a/doc/fluid/api/framework/CUDAPlace.rst b/doc/fluid/api/framework/CUDAPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ea2b3bd80337b431632ddad875b703a81ac96461
--- /dev/null
+++ b/doc/fluid/api/framework/CUDAPlace.rst
@@ -0,0 +1,7 @@
+.. _api_framework_CUDAPlace:
+
+CUDAPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPlace
+
+
diff --git a/doc/fluid/api/framework/CompiledProgram.rst b/doc/fluid/api/framework/CompiledProgram.rst
new file mode 100644
index 0000000000000000000000000000000000000000..62240afc324523a7d1e106d90e22a2ea68d6bee7
--- /dev/null
+++ b/doc/fluid/api/framework/CompiledProgram.rst
@@ -0,0 +1,7 @@
+.. _api_framework_CompiledProgram:
+
+CompiledProgram
+-------------------------------
+:doc_source: paddle.fluid.compiler.CompiledProgram
+
+
diff --git a/doc/fluid/api/framework/ExecutionStrateg y.rst b/doc/fluid/api/framework/ExecutionStrateg y.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bfc3a5c88c4d89b8b2c75bc9708796b7953becef
--- /dev/null
+++ b/doc/fluid/api/framework/ExecutionStrateg y.rst
@@ -0,0 +1,7 @@
+.. _api_framework_ExecutionStrateg y:
+
+ExecutionStrateg y
+-------------------------------
+:doc_source: paddle.fluid.compiler.ExecutionStrateg y
+
+
diff --git a/doc/fluid/api/framework/Executor.rst b/doc/fluid/api/framework/Executor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9ca83758b42f769291671e2acc46f4db815f5bd1
--- /dev/null
+++ b/doc/fluid/api/framework/Executor.rst
@@ -0,0 +1,7 @@
+.. _api_framework_Executor:
+
+Executor
+-------------------------------
+:doc_source: paddle.fluid.executor.Executor
+
+
diff --git a/doc/fluid/api/framework/ParallelExecutor.rst b/doc/fluid/api/framework/ParallelExecutor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f08e60ec512077ba378cb3193344ee5d0ed8a3a0
--- /dev/null
+++ b/doc/fluid/api/framework/ParallelExecutor.rst
@@ -0,0 +1,7 @@
+.. _api_framework_ParallelExecutor:
+
+ParallelExecutor
+-------------------------------
+:doc_source: paddle.fluid.parallel_executor.ParallelExecutor
+
+
diff --git a/doc/fluid/api/framework/ParamAttr.rst b/doc/fluid/api/framework/ParamAttr.rst
new file mode 100644
index 0000000000000000000000000000000000000000..505d47e2c53d861d08056401e88c9caf06a33996
--- /dev/null
+++ b/doc/fluid/api/framework/ParamAttr.rst
@@ -0,0 +1,7 @@
+.. _api_framework_ParamAttr:
+
+ParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.ParamAttr
+
+
diff --git a/doc/fluid/api/framework/Print.rst b/doc/fluid/api/framework/Print.rst
new file mode 100644
index 0000000000000000000000000000000000000000..159499711821704c12f638f1b59efe0b3e5c92f1
--- /dev/null
+++ b/doc/fluid/api/framework/Print.rst
@@ -0,0 +1,7 @@
+.. _api_framework_Print:
+
+Print
+-------------------------------
+:doc_source: paddle.fluid.layers.control_flow.Print
+
+
diff --git a/doc/fluid/api/framework/Program.rst b/doc/fluid/api/framework/Program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f118dc60164aee8ccc56e0b9aa0954883c39a203
--- /dev/null
+++ b/doc/fluid/api/framework/Program.rst
@@ -0,0 +1,7 @@
+.. _api_framework_Program:
+
+Program
+-------------------------------
+:doc_source: paddle.fluid.framework.Program
+
+
diff --git a/doc/fluid/api/framework/Variable.rst b/doc/fluid/api/framework/Variable.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f25d40720945653915f12a67f81d918696e178c1
--- /dev/null
+++ b/doc/fluid/api/framework/Variable.rst
@@ -0,0 +1,7 @@
+.. _api_framework_Variable:
+
+Variable
+-------------------------------
+:doc_source: paddle.fluid.framework.Variable
+
+
diff --git a/doc/fluid/api/framework/WeightNormParamAttr.rst b/doc/fluid/api/framework/WeightNormParamAttr.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc669a67885fe742876c8e4a00e1fd6849e97ec9
--- /dev/null
+++ b/doc/fluid/api/framework/WeightNormParamAttr.rst
@@ -0,0 +1,7 @@
+.. _api_framework_WeightNormParamAttr:
+
+WeightNormParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.WeightNormParamAttr
+
+
diff --git a/doc/fluid/api/framework/append_backward.rst b/doc/fluid/api/framework/append_backward.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a8b85f6364c10bccaccc4b5f3a0ee255bb279849
--- /dev/null
+++ b/doc/fluid/api/framework/append_backward.rst
@@ -0,0 +1,7 @@
+.. _api_framework_append_backward:
+
+append_backward
+-------------------------------
+:doc_source: paddle.fluid.backward.append_backward
+
+
diff --git a/doc/fluid/api/framework/create_global_var.rst b/doc/fluid/api/framework/create_global_var.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7606f1657574568651a2d02d4b82740da35e8155
--- /dev/null
+++ b/doc/fluid/api/framework/create_global_var.rst
@@ -0,0 +1,7 @@
+.. _api_framework_create_global_var:
+
+create_global_var
+-------------------------------
+:doc_source: paddle.fluid.layers.tensor.create_global_var
+
+
diff --git a/doc/fluid/api/framework/create_parameter.rst b/doc/fluid/api/framework/create_parameter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97c4c1e58d0d926617e3a39fd743ffc6530f4066
--- /dev/null
+++ b/doc/fluid/api/framework/create_parameter.rst
@@ -0,0 +1,7 @@
+.. _api_framework_create_parameter:
+
+create_parameter
+-------------------------------
+:doc_source: paddle.fluid.layers.create_parameter
+
+
diff --git a/doc/fluid/api/framework/default_main_program.rst b/doc/fluid/api/framework/default_main_program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..774845e799986d60abda4d3aa7a84346b586a9c1
--- /dev/null
+++ b/doc/fluid/api/framework/default_main_program.rst
@@ -0,0 +1,7 @@
+.. _api_framework_default_main_program:
+
+default_main_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_main_program
+
+
diff --git a/doc/fluid/api/framework/default_startup_program.rst b/doc/fluid/api/framework/default_startup_program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..72872cc3ac2a325f8c459a8a8c9d8f681e7aaff7
--- /dev/null
+++ b/doc/fluid/api/framework/default_startup_program.rst
@@ -0,0 +1,7 @@
+.. _api_framework_default_startup_program:
+
+default_startup_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_startup_program
+
+
diff --git a/doc/fluid/api/framework/global_scope.rst b/doc/fluid/api/framework/global_scope.rst
new file mode 100644
index 0000000000000000000000000000000000000000..452256cf58458495e204e55cb585858eb85f741d
--- /dev/null
+++ b/doc/fluid/api/framework/global_scope.rst
@@ -0,0 +1,7 @@
+.. _api_framework_global_scope:
+
+global_scope
+-------------------------------
+:doc_source: paddle.fluid.executor.global_scope
+
+
diff --git a/doc/fluid/api/framework/gradients.rst b/doc/fluid/api/framework/gradients.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c082237025e5597e910e5786d1b06ac88cfcfeb
--- /dev/null
+++ b/doc/fluid/api/framework/gradients.rst
@@ -0,0 +1,7 @@
+.. _api_framework_gradients:
+
+gradients
+-------------------------------
+:doc_source: paddle.fluid.backward.gradients
+
+
diff --git a/doc/fluid/api/framework/name_scope.rst b/doc/fluid/api/framework/name_scope.rst
new file mode 100644
index 0000000000000000000000000000000000000000..616c4cd677bca531fefa3c4719839cc52341a608
--- /dev/null
+++ b/doc/fluid/api/framework/name_scope.rst
@@ -0,0 +1,7 @@
+.. _api_framework_name_scope:
+
+name_scope
+-------------------------------
+:doc_source: paddle.fluid.framework.name_scope
+
+
diff --git a/doc/fluid/api/framework/program_guard.rst b/doc/fluid/api/framework/program_guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d68a50af22b0225a3c3059f02c9c9e3c8a2b57b9
--- /dev/null
+++ b/doc/fluid/api/framework/program_guard.rst
@@ -0,0 +1,7 @@
+.. _api_framework_program_guard:
+
+program_guard
+-------------------------------
+:doc_source: paddle.fluid.framework.program_guard
+
+
diff --git a/doc/fluid/api/framework/py_func.rst b/doc/fluid/api/framework/py_func.rst
new file mode 100644
index 0000000000000000000000000000000000000000..57a4c4194a4523fecdfc0c7d0f4e00880e300bb3
--- /dev/null
+++ b/doc/fluid/api/framework/py_func.rst
@@ -0,0 +1,7 @@
+.. _api_framework_py_func:
+
+py_func
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.py_func
+
+
diff --git a/doc/fluid/api/framework/scope_guard.rst b/doc/fluid/api/framework/scope_guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..661564de6eef232a19842a903c5ca76ba1abceea
--- /dev/null
+++ b/doc/fluid/api/framework/scope_guard.rst
@@ -0,0 +1,7 @@
+.. _api_framework_scope_guard:
+
+scope_guard
+-------------------------------
+:doc_source: paddle.fluid.executor.scope_guard
+
+
diff --git a/doc/fluid/api/gen_doc.py b/doc/fluid/api/gen_doc.py
index a45f854ef38715f66d019a58edb4e17dcb1a74c5..1f4f3ad49332e585cc99c23228928dd59138b50a 100644
--- a/doc/fluid/api/gen_doc.py
+++ b/doc/fluid/api/gen_doc.py
@@ -19,18 +19,41 @@ import types
import os
import contextlib
import paddle.fluid as fluid
+import paddle.tensor as tensor
+import paddle.nn as nn
+import paddle.optimizer as optimizer
+
+#import paddle.complex as complex
+#import paddle.framework as framework
+
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--submodules', nargs="*")
parser.add_argument(
- '--module_name', type=str, help='Generate the documentation of which module')
+ '--module_name',
+ type=str,
+ help='Generate the documentation of which module')
parser.add_argument(
'--module_prefix', type=str, help='Generate the prefix of module')
parser.add_argument(
- '--output', type=str, help='Output file or output directory for output rst')
+ '--output',
+ type=str,
+ help='Output file or output directory for output rst')
+ parser.add_argument(
+ '--output_name',
+ type=str,
+ help='Output file or output directory for output rst')
parser.add_argument(
- '--to_multiple_files', type=bool, default=False, help='Whether to separate to multiple files')
+ '--output_dir',
+ type=str,
+ help='Output file or output directory for output rst')
+ parser.add_argument(
+ '--to_multiple_files',
+ type=bool,
+ default=False,
+ help='Whether to separate to multiple files')
+
return parser.parse_args()
def print_item(self, name):
@@ -44,8 +67,9 @@ def parse_arg():
else:
pass
+
class DocGenerator(object):
- def __init__(self, module_name=None, module_prefix=None):
+ def __init__(self, module_name=None, module_prefix=None):
self.module_name = module_name
self.module_prefix = module_prefix
self.stream = None
@@ -53,7 +77,7 @@ class DocGenerator(object):
@contextlib.contextmanager
def guard(self, filename):
assert self.stream is None, "stream must be None"
- self.stream = open(filename, 'w')
+ self.stream = open(filename, 'w')
yield
self.stream.close()
self.stream = None
@@ -61,20 +85,21 @@ class DocGenerator(object):
def print_submodule(self, submodule_name):
submodule = getattr(self.module, submodule_name)
if submodule is None:
- raise ValueError("Cannot find submodule {0}".format(submodule_name))
+ raise ValueError(
+ "Cannot find submodule {0}".format(submodule_name))
self.print_section(submodule_name)
- for item in sorted(submodule.__all__,key=str.lower):
+ for item in sorted(submodule.__all__, key=str.lower):
self.print_item(item)
def print_current_module(self):
- for item in sorted(self.module.__all__,key=str.lower):
+ for item in sorted(self.module.__all__, key=str.lower):
self.print_item(item)
def print_section(self, name):
self._print_header_(name, dot='=', is_title=False)
- def print_item(self, name):
+ def print_item(self, name, output_name):
item = getattr(self.module, name, None)
if isinstance(item, types.TypeType):
self.print_class(name)
@@ -82,7 +107,7 @@ class DocGenerator(object):
self.print_method(name)
else:
self.stream.close()
- path = os.getcwd()+"/fluid/"+name+".rst"
+ path = os.getcwd() + "/" + output_name + "/" + name + ".rst"
if name != "PipeReader":
os.remove(path)
@@ -140,7 +165,9 @@ class DocGenerator(object):
self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join(
self.module_prefix.split(".")), name))
-def generate_doc(module_name, module_prefix, output, to_multiple_files):
+
+def generate_doc(module_name, module_prefix, output, output_name,
+ to_multiple_files, output_dir):
if module_name == "":
module_name = None
@@ -150,25 +177,31 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files):
gen = DocGenerator()
if module_name is None:
- gen.module = fluid
- gen.module_name = 'fluid'
+ gen.module = eval(output_name)
+ gen.module_name = str(output_name)
else:
- gen.module = fluid
+ gen.module = eval(output_name)
for each_module_name in module_name.split('.'):
if not hasattr(gen.module, each_module_name):
raise ValueError("Cannot find fluid.{0}".format(module_name))
else:
gen.module = getattr(gen.module, each_module_name)
- gen.module_name = "fluid." + module_name
+ gen.module_name = output_name + "." + module_name
if module_prefix is None:
gen.module_prefix = gen.module_name
else:
- gen.module_prefix = "fluid." + module_prefix
+ gen.module_prefix = output_name + "." + module_prefix
+
+ dirname = output if to_multiple_files else os.path.dirname(output)
+
+ if output_dir != None:
+ dirname = output_dir + "/" + dirname
+ output = output_dir + "/" + output
- dirname = output if to_multiple_files else os.path.dirname(output)
- if len(dirname) > 0 and (not os.path.exists(dirname) or not os.path.isdir(dirname)):
+ if len(dirname) > 0 and (not os.path.exists(dirname) or
+ not os.path.isdir(dirname)):
os.makedirs(dirname)
if not to_multiple_files:
@@ -177,7 +210,7 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files):
prefix_len = len(gen.module_prefix)
assert gen.module_prefix == gen.module_name[0:prefix_len], \
"module_prefix must be prefix of module_name"
- diff_name = gen.module_name[prefix_len+1:]
+ diff_name = gen.module_name[prefix_len + 1:]
if diff_name != "":
header_name = diff_name
else:
@@ -189,17 +222,18 @@ def generate_doc(module_name, module_prefix, output, to_multiple_files):
gen._print_header_(header_name, dot='=', is_title=True)
gen.print_current_module()
else:
- apis = sorted(gen.module.__all__,key=str.lower)
+ apis = sorted(gen.module.__all__, key=str.lower)
for api in apis:
header_name = api
with gen.guard(os.path.join(output, api + '.rst')):
gen.print_header_reminder()
- gen.print_item(api)
+ gen.print_item(api, output_name)
def main():
args = parse_arg()
- generate_doc(args.module_name, args.module_prefix, args.output, args.to_multiple_files)
+ generate_doc(args.module_name, args.module_prefix, args.output,
+ args.output_name, args.to_multiple_files, args.output_dir)
if __name__ == '__main__':
diff --git a/doc/fluid/api/gen_doc.sh b/doc/fluid/api/gen_doc.sh
index 1e833161ef0e225e4725136ad3d466d945b69bec..5284b277e24cf9ea8eeaf79c0aeb86c8fe5f6904 100644
--- a/doc/fluid/api/gen_doc.sh
+++ b/doc/fluid/api/gen_doc.sh
@@ -1,23 +1,44 @@
#!/bin/bash
-#for module in nn
-#do
-# python gen_doc.py --module_name layers.${module} --module_prefix layers --output layers/${module} --to_multiple_files True
-#done
-
-#for module in control_flow nn io ops tensor learning_rate_scheduler detection metric_op
-#do
-# python gen_doc.py --module_name layers.${module} --module_prefix layers --output layers/${module}.rst
-#done
-
-for module in layers dataset clip metrics executor initializer io nets optimizer profiler regularizer transpiler backward profiler unique_name dygraph
+for module in layers dataset clip metrics executor initializer io nets optimizer profiler regularizer transpiler backward profiler unique_name dygraph framework
do
- python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --to_multiple_files True
+ python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name fluid --to_multiple_files True
python gen_module_index.py ${module} fluid.${module}
done
-python gen_doc.py --module_name "" --module_prefix "" --output fluid --to_multiple_files True
+python gen_doc.py --module_name "" --module_prefix "" --output fluid --output_name fluid --to_multiple_files True
python gen_module_index.py fluid fluid
+# tensor
+for module in math random stat linalg search
+do
+ python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name tensor --to_multiple_files True --output_dir tensor
+ python gen_module_index.py tensor.${module} ${module}
+done
+
+python gen_module_index.py tensor paddle.tensor
+
+for module in math manipulation linalg
+do
+ python gen_doc.py --module_name tensor.${module} --module_prefix tensor.${module} --output tensor/${module} --output_name complex --to_multiple_files True --output_dir complex
+ python gen_module_index.py complex.tensor.${module} ${module}
+done
+
+python gen_module_index.py complex.tensor tensor
+python gen_module_index.py complex paddle.complex
+python gen_module_index.py framework paddle.framework
+
+
+# nn
+for module in loss activation
+do
+ python gen_doc.py --module_name ${module} --module_prefix ${module} --output ${module} --output_name nn --to_multiple_files True --output_dir nn
+ python gen_module_index.py nn.${module} ${module}
+done
+
+python gen_doc.py --module_name "" --module_prefix "" --output nn --output_name nn --to_multiple_files True
+python gen_module_index.py nn paddle.nn
+
+# index.rst
python gen_index.py
diff --git a/doc/fluid/api/gen_index.py b/doc/fluid/api/gen_index.py
index d34142b395bb49a35cac273afbe60a571fcdf3c2..4cc7272b03aa0fec3eefe543d7ff7ad791d6e1fd 100644
--- a/doc/fluid/api/gen_index.py
+++ b/doc/fluid/api/gen_index.py
@@ -4,7 +4,7 @@ import glob
import os
if __name__ == '__main__':
- with open('index_en.rst', 'w') as file_object:
+ with open('index_en.rst', 'w') as file_object:
file_object = open('index_en.rst', 'w')
file_object.write('''=============
API Reference
@@ -14,11 +14,10 @@ API Reference
:maxdepth: 1
../api_guides/index_en.rst
- fluid.rst
''')
target_dirs = ['.', 'data']
-
+
file_names = []
for target_dir in target_dirs:
if target_dir == '.':
@@ -28,5 +27,14 @@ API Reference
file_names.extend(glob.glob(pattern))
for file_name in sorted(file_names):
- if file_name not in ['index_en.rst', 'fluid.rst']:
- file_object.write(' '+file_name + "\n")
+ with open(file_name, 'r') as f:
+ for i in range(2):
+ line = f.readline().strip()
+ if line.find('paddle.') != -1:
+ file_object.write(' ' + file_name + "\n")
+ file_names.remove(file_name)
+
+ file_object.write(' ' + 'fluid.rst' + "\n")
+ for file_name in sorted(file_names):
+ if file_name not in ['index_en.rst']:
+ file_object.write(' ' + file_name + "\n")
diff --git a/doc/fluid/api/imperative.rst b/doc/fluid/api/imperative.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f138e06701b138dc109dab2e3b1c17832658d390
--- /dev/null
+++ b/doc/fluid/api/imperative.rst
@@ -0,0 +1,29 @@
+=======================
+paddle.imperative
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ imperative/CosineDecay.rst
+ imperative/DataParallel.rst
+ imperative/declarative.rst
+ imperative/enabled.rst
+ imperative/ExponentialDecay.rst
+ imperative/grad.rst
+ imperative/guard.rst
+ imperative/InverseTimeDecay.rst
+ imperative/jit.rst
+ imperative/load.rst
+ imperative/NaturalExpDecay.rst
+ imperative/no_grad.rst
+ imperative/NoamDecay.rst
+ imperative/ParallelEnv.rst
+ imperative/PiecewiseDecay.rst
+ imperative/PolynomialDecay.rst
+ imperative/prepare_context.rst
+ imperative/ProgramTranslator.rst
+ imperative/save.rst
+ imperative/to_variable.rst
+ imperative/TracedLayer.rst
+ imperative/TranslatedLayer.rst
diff --git a/doc/fluid/api/imperative/CosineDecay.rst b/doc/fluid/api/imperative/CosineDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5082f7224c9e7ef13e8e52f0f58a4689fd0e878e
--- /dev/null
+++ b/doc/fluid/api/imperative/CosineDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_CosineDecay:
+
+CosineDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay
+
+
diff --git a/doc/fluid/api/imperative/DataParallel.rst b/doc/fluid/api/imperative/DataParallel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9ee8b0b73be302f847e1a8fef795334e3c063911
--- /dev/null
+++ b/doc/fluid/api/imperative/DataParallel.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_DataParallel:
+
+DataParallel
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.DataParallel
+
+
diff --git a/doc/fluid/api/imperative/ExponentialDecay.rst b/doc/fluid/api/imperative/ExponentialDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..758a162e99ec1d29bd82dd064159e8c3df48558b
--- /dev/null
+++ b/doc/fluid/api/imperative/ExponentialDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_ExponentialDecay:
+
+ExponentialDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay
+
+
diff --git a/doc/fluid/api/imperative/InverseTimeDecay.rst b/doc/fluid/api/imperative/InverseTimeDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0e760981dc587181d6882f2cd53b662dc123b44c
--- /dev/null
+++ b/doc/fluid/api/imperative/InverseTimeDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_InverseTimeDecay:
+
+InverseTimeDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay
+
+
diff --git a/doc/fluid/api/imperative/NaturalExpDecay.rst b/doc/fluid/api/imperative/NaturalExpDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..50ed2fd4a0a6dc0d0b58ff3867ccfcdb718039da
--- /dev/null
+++ b/doc/fluid/api/imperative/NaturalExpDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_NaturalExpDecay:
+
+NaturalExpDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay
+
+
diff --git a/doc/fluid/api/imperative/NoamDecay.rst b/doc/fluid/api/imperative/NoamDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b126e65f1d9dc151d21269119c0e272a2ba7d9bb
--- /dev/null
+++ b/doc/fluid/api/imperative/NoamDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_NoamDecay:
+
+NoamDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay
+
+
diff --git a/doc/fluid/api/imperative/ParallelEnv.rst b/doc/fluid/api/imperative/ParallelEnv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..edfe9fdb960ccb2c9f9487297869e820d900db9d
--- /dev/null
+++ b/doc/fluid/api/imperative/ParallelEnv.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_ParallelEnv:
+
+ParallelEnv
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv
+
+
diff --git a/doc/fluid/api/imperative/PiecewiseDecay.rst b/doc/fluid/api/imperative/PiecewiseDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..438c6c012c547366cae0475b7925da9d46b32970
--- /dev/null
+++ b/doc/fluid/api/imperative/PiecewiseDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_PiecewiseDecay:
+
+PiecewiseDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay
+
+
diff --git a/doc/fluid/api/imperative/PolynomialDecay.rst b/doc/fluid/api/imperative/PolynomialDecay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c4ba271cc5f33b70a945ca15ff7d9847a945ccd1
--- /dev/null
+++ b/doc/fluid/api/imperative/PolynomialDecay.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_PolynomialDecay:
+
+PolynomialDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay
+
+
diff --git a/doc/fluid/api/imperative/ProgramTranslator.rst b/doc/fluid/api/imperative/ProgramTranslator.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b1117d15aafd012bdf7a886ec3d43c3881cf9a0
--- /dev/null
+++ b/doc/fluid/api/imperative/ProgramTranslator.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_ProgramTranslator:
+
+ProgramTranslator
+-------------------------------
+:doc_source: paddle.fluid.dygraph.ProgramTranslator
+
+
diff --git a/doc/fluid/api/imperative/TracedLayer.rst b/doc/fluid/api/imperative/TracedLayer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0d623135fe8f46b7f2bfe48f3b4f9c56c19ab9da
--- /dev/null
+++ b/doc/fluid/api/imperative/TracedLayer.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_TracedLayer:
+
+TracedLayer
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.TracedLayer
+
+
diff --git a/doc/fluid/api/imperative/TranslatedLayer.rst b/doc/fluid/api/imperative/TranslatedLayer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0299a9f57392e267ae015947345249784fd929f5
--- /dev/null
+++ b/doc/fluid/api/imperative/TranslatedLayer.rst
@@ -0,0 +1,5 @@
+.. _api_imperative_TranslatedLayer:
+
+TranslatedLayer
+-------------------------------
+:doc_source: paddle.fluid.dygraph.io.TranslatedLayer
diff --git a/doc/fluid/api/imperative/declarative.rst b/doc/fluid/api/imperative/declarative.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d79e5161282a7344568d9223bb5f5a99fefd44f
--- /dev/null
+++ b/doc/fluid/api/imperative/declarative.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_declarative:
+
+declarative
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.declarative
+
+
diff --git a/doc/fluid/api/imperative/enabled.rst b/doc/fluid/api/imperative/enabled.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7279ce01e99e43e7e4a58f8484fbae582ebf62b0
--- /dev/null
+++ b/doc/fluid/api/imperative/enabled.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_enabled:
+
+enabled
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.enabled
+
+
diff --git a/doc/fluid/api/imperative/grad.rst b/doc/fluid/api/imperative/grad.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e147b53f0304af215ebd98b9b3b225f698a6fa31
--- /dev/null
+++ b/doc/fluid/api/imperative/grad.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_grad:
+
+grad
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.grad
+
+
diff --git a/doc/fluid/api/imperative/guard.rst b/doc/fluid/api/imperative/guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..776b12e7ad95b98784b27e2d31252e01d9e0a557
--- /dev/null
+++ b/doc/fluid/api/imperative/guard.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_guard:
+
+guard
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.guard
+
+
diff --git a/doc/fluid/api/imperative/jit.rst b/doc/fluid/api/imperative/jit.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7853a048535c045bae18f71c8b4d7f1e44cc65eb
--- /dev/null
+++ b/doc/fluid/api/imperative/jit.rst
@@ -0,0 +1,10 @@
+===
+jit
+===
+
+.. toctree::
+ :maxdepth: 1
+
+ jit/save.rst
+ jit/load.rst
+ jit/SaveLoadConfig.rst
diff --git a/doc/fluid/api/imperative/jit/SaveLoadConfig.rst b/doc/fluid/api/imperative/jit/SaveLoadConfig.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cab85776ec33f9cab2dc788ebbb3081fca1d4035
--- /dev/null
+++ b/doc/fluid/api/imperative/jit/SaveLoadConfig.rst
@@ -0,0 +1,5 @@
+.. _api_imperative_jit_SaveLoadConfig:
+
+SaveLoadConfig
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.SaveLoadConfig
diff --git a/doc/fluid/api/imperative/jit/load.rst b/doc/fluid/api/imperative/jit/load.rst
new file mode 100644
index 0000000000000000000000000000000000000000..723a87936a8f26653eb2b34f361aa35a4b3fd74f
--- /dev/null
+++ b/doc/fluid/api/imperative/jit/load.rst
@@ -0,0 +1,5 @@
+.. _api_imperative_jit_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.load
diff --git a/doc/fluid/api/imperative/jit/save.rst b/doc/fluid/api/imperative/jit/save.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b809a99166e35edd65af253dffe40053776a68dc
--- /dev/null
+++ b/doc/fluid/api/imperative/jit/save.rst
@@ -0,0 +1,5 @@
+.. _api_imperative_jit_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.save
diff --git a/doc/fluid/api/imperative/load.rst b/doc/fluid/api/imperative/load.rst
new file mode 100644
index 0000000000000000000000000000000000000000..819117f0388e32e1893d3827b70ce88780340644
--- /dev/null
+++ b/doc/fluid/api/imperative/load.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.dygraph.checkpoint.load_dygraph
+
+
diff --git a/doc/fluid/api/imperative/no_grad.rst b/doc/fluid/api/imperative/no_grad.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0a737ec558cf6d96d6d6204046164871f0138f9d
--- /dev/null
+++ b/doc/fluid/api/imperative/no_grad.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_no_grad:
+
+no_grad
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.no_grad
+
+
diff --git a/doc/fluid/api/imperative/prepare_context.rst b/doc/fluid/api/imperative/prepare_context.rst
new file mode 100644
index 0000000000000000000000000000000000000000..33c89846e1725cb70cd170f08223318e583dc531
--- /dev/null
+++ b/doc/fluid/api/imperative/prepare_context.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_prepare_context:
+
+prepare_context
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.prepare_context
+
+
diff --git a/doc/fluid/api/imperative/save.rst b/doc/fluid/api/imperative/save.rst
new file mode 100644
index 0000000000000000000000000000000000000000..90353ff25557fafe4c50959289867df3dd39b54c
--- /dev/null
+++ b/doc/fluid/api/imperative/save.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.dygraph.checkpoint.save_dygraph
+
+
diff --git a/doc/fluid/api/imperative/to_variable.rst b/doc/fluid/api/imperative/to_variable.rst
new file mode 100644
index 0000000000000000000000000000000000000000..21bb639afc7e6f13facece0cb38680ae3735fedb
--- /dev/null
+++ b/doc/fluid/api/imperative/to_variable.rst
@@ -0,0 +1,7 @@
+.. _api_imperative_to_variable:
+
+to_variable
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.to_variable
+
+
diff --git a/doc/fluid/api/index_en.rst b/doc/fluid/api/index_en.rst
index dce0d7cf9679ca2b47fd2a2713c31d5538b6a61c..e3ac0dc69c3487f301cb42c6053cf2f4bd82ccdc 100644
--- a/doc/fluid/api/index_en.rst
+++ b/doc/fluid/api/index_en.rst
@@ -6,21 +6,32 @@ API Reference
:maxdepth: 1
../api_guides/index_en.rst
+ dataset.rst
+ declarative.rst
+ distributed.rst
+ framework.rst
+ imperative.rst
+ io.rst
+ metric.rst
+ nn.rst
+ optimizer.rst
+ static.rst
+ tensor.rst
fluid.rst
backward.rst
clip.rst
data/data_reader.rst
data/dataset.rst
- dataset.rst
dygraph.rst
executor.rst
+ fluid.rst
initializer.rst
- io.rst
layers.rst
metrics.rst
nets.rst
- optimizer.rst
+ paddle.rst
profiler.rst
regularizer.rst
transpiler.rst
unique_name.rst
+ review_tmp.rst
diff --git a/doc/fluid/api/io.rst b/doc/fluid/api/io.rst
index db67b4f7c9e7d51326ea92f4086739f0b2e0bdb2..666d584ca533027ddeb3f1a16575365804260b81 100644
--- a/doc/fluid/api/io.rst
+++ b/doc/fluid/api/io.rst
@@ -1,17 +1,19 @@
========
-fluid.io
+paddle.io
========
.. toctree::
:maxdepth: 1
io/batch.rst
+ io/BatchSampler.rst
io/buffered.rst
io/cache.rst
io/chain.rst
io/compose.rst
io/ComposeNotAligned.rst
io/DataLoader.rst
+ io/Dataset.rst
io/firstn.rst
io/get_program_parameter.rst
io/get_program_persistable_vars.rst
@@ -26,9 +28,12 @@ fluid.io
io/PyReader.rst
io/save.rst
io/save_inference_model.rst
+ io/save_inference_model.rst
io/save_params.rst
io/save_persistables.rst
io/save_vars.rst
io/set_program_state.rst
+ io/set_program_state.rst
+ io/shuffle.rst
io/shuffle.rst
io/xmap_readers.rst
diff --git a/doc/fluid/api/io/BatchSampler.rst b/doc/fluid/api/io/BatchSampler.rst
new file mode 100644
index 0000000000000000000000000000000000000000..91b872bec319a6c9e4c2f7031448b73afbab5a9c
--- /dev/null
+++ b/doc/fluid/api/io/BatchSampler.rst
@@ -0,0 +1,7 @@
+.. _api_io_BatchSampler:
+
+BatchSampler
+-------------------------------
+:doc_source: paddle.fluid.dataloader.BatchSampler
+
+
diff --git a/doc/fluid/api/io/ComposeNotAligned.rst b/doc/fluid/api/io/ComposeNotAligned.rst
index c2f3f465b7dd287e22c5bdcf2401c40d5494d4f1..3968d80ce3cc5c174763c7b6161c80e3c3840042 100644
--- a/doc/fluid/api/io/ComposeNotAligned.rst
+++ b/doc/fluid/api/io/ComposeNotAligned.rst
@@ -11,4 +11,3 @@ ComposeNotAligned
:inherited-members:
:noindex:
-This indicates an error state of compose API, which will raise when outputs of readers are not aligned.
diff --git a/doc/fluid/api/io/Dataset.rst b/doc/fluid/api/io/Dataset.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b86e0a377ff44d7c8c231a1f5af317cf904d3a3e
--- /dev/null
+++ b/doc/fluid/api/io/Dataset.rst
@@ -0,0 +1,7 @@
+.. _api_io_Dataset:
+
+Dataset
+-------------------------------
+:doc_source: paddle.fluid.dataloader.Dataset
+
+
diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst
index 5c45ac4bdcbf740089a69c4d112fcba63bf10425..0f1fe3c222c5266deacca8603ff10ce9fed33429 100644
--- a/doc/fluid/api/layers.rst
+++ b/doc/fluid/api/layers.rst
@@ -25,6 +25,7 @@ fluid.layers
layers/atan.rst
layers/auc.rst
layers/autoincreased_step_counter.rst
+ layers/BasicDecoder.rst
layers/batch_norm.rst
layers/beam_search.rst
layers/beam_search_decode.rst
@@ -68,6 +69,7 @@ fluid.layers
layers/cumsum.rst
layers/data.rst
layers/data_norm.rst
+ layers/DecodeHelper.rst
layers/Decoder.rst
layers/deformable_conv.rst
layers/deformable_roi_pooling.rst
@@ -121,6 +123,7 @@ fluid.layers
layers/get_tensor_from_selected_rows.rst
layers/greater_equal.rst
layers/greater_than.rst
+ layers/GreedyEmbeddingHelper.rst
layers/grid_sampler.rst
layers/group_norm.rst
layers/gru_unit.rst
@@ -138,6 +141,7 @@ fluid.layers
layers/image_resize.rst
layers/image_resize_short.rst
layers/increment.rst
+ layers/inplace_abn.rst
layers/instance_norm.rst
layers/inverse_time_decay.rst
layers/iou_similarity.rst
@@ -178,6 +182,7 @@ fluid.layers
layers/mul.rst
layers/multi_box_head.rst
layers/multiclass_nms.rst
+ layers/matrix_nms.rst
layers/multiplex.rst
layers/MultivariateNormalDiag.rst
layers/natural_exp_decay.rst
@@ -239,6 +244,7 @@ fluid.layers
layers/rpn_target_assign.rst
layers/rsqrt.rst
layers/sampled_softmax_with_cross_entropy.rst
+ layers/SampleEmbeddingHelper.rst
layers/sampling_id.rst
layers/scale.rst
layers/scatter.rst
@@ -304,6 +310,7 @@ fluid.layers
layers/tensor_array_to_tensor.rst
layers/thresholded_relu.rst
layers/topk.rst
+ layers/TrainingHelper.rst
layers/transpose.rst
layers/unfold.rst
layers/Uniform.rst
diff --git a/doc/fluid/api/layers/BasicDecoder.rst b/doc/fluid/api/layers/BasicDecoder.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8eb0f78dc8621d42061dfe944106c64007a0d0c1
--- /dev/null
+++ b/doc/fluid/api/layers/BasicDecoder.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_BasicDecoder:
+
+BasicDecoder
+------------
+
+.. autoclass:: paddle.fluid.layers.BasicDecoder
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/layers/DecodeHelper.rst b/doc/fluid/api/layers/DecodeHelper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ba475f2a1daea0e842d07d0372de2a828de2931a
--- /dev/null
+++ b/doc/fluid/api/layers/DecodeHelper.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_DecodeHelper:
+
+DecodeHelper
+------------
+
+.. autoclass:: paddle.fluid.layers.DecodeHelper
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/layers/GreedyEmbeddingHelper.rst b/doc/fluid/api/layers/GreedyEmbeddingHelper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb7741ec499fcff9404d384b760c03613c63a327
--- /dev/null
+++ b/doc/fluid/api/layers/GreedyEmbeddingHelper.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_GreedyEmbeddingHelper:
+
+GreedyEmbeddingHelper
+---------------------
+
+.. autoclass:: paddle.fluid.layers.GreedyEmbeddingHelper
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/layers/SampleEmbeddingHelper.rst b/doc/fluid/api/layers/SampleEmbeddingHelper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..99b9ca39900643e5c5c5806106c66a151fdf9a27
--- /dev/null
+++ b/doc/fluid/api/layers/SampleEmbeddingHelper.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_SampleEmbeddingHelper:
+
+SampleEmbeddingHelper
+---------------------
+
+.. autoclass:: paddle.fluid.layers.SampleEmbeddingHelper
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/layers/TrainingHelper.rst b/doc/fluid/api/layers/TrainingHelper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..247ac73d1f15cc9413ec60e0dd4d7d9047e308de
--- /dev/null
+++ b/doc/fluid/api/layers/TrainingHelper.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_TrainingHelper:
+
+TrainingHelper
+--------------
+
+.. autoclass:: paddle.fluid.layers.TrainingHelper
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/layers/inplace_abn.rst b/doc/fluid/api/layers/inplace_abn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b3b31942f37c7cb43a5d95f1d6965acaf08efdca
--- /dev/null
+++ b/doc/fluid/api/layers/inplace_abn.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_inplace_abn:
+
+inplace_abn
+-----------
+
+.. autofunction:: paddle.fluid.layers.inplace_abn
+ :noindex:
+
diff --git a/doc/fluid/api/layers/matrix_nms.rst b/doc/fluid/api/layers/matrix_nms.rst
new file mode 100644
index 0000000000000000000000000000000000000000..60bbbeb151bdd87861c37b625139988ce7db9467
--- /dev/null
+++ b/doc/fluid/api/layers/matrix_nms.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_fluid_layers_matrix_nms:
+
+matrix_nms
+--------------
+
+.. autofunction:: paddle.fluid.layers.matrix_nms
+ :noindex:
+
diff --git a/doc/fluid/api/metric.rst b/doc/fluid/api/metric.rst
new file mode 100644
index 0000000000000000000000000000000000000000..80dc846d48298d9c735dd242e706b69b3a0b4a44
--- /dev/null
+++ b/doc/fluid/api/metric.rst
@@ -0,0 +1,20 @@
+=======================
+paddle.metric
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ metric/Accuracy.rst
+ metric/accuracy.rst
+ metric/Auc.rst
+ metric/auc.rst
+ metric/chunk_eval.rst
+ metric/ChunkEvaluator.rst
+ metric/CompositeMetric.rst
+ metric/cos_sim.rst
+ metric/DetectionMAP.rst
+ metric/EditDistance.rst
+ metric/mean_iou.rst
+ metric/Precision.rst
+ metric/Recall.rst
diff --git a/doc/fluid/api/metric/ChunkEvaluator.rst b/doc/fluid/api/metric/ChunkEvaluator.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0df999058475a86bf32673b784185bb678063e71
--- /dev/null
+++ b/doc/fluid/api/metric/ChunkEvaluator.rst
@@ -0,0 +1,7 @@
+.. _api_metric_ChunkEvaluator:
+
+ChunkEvaluator
+-------------------------------
+:doc_source: paddle.fluid.metrics.ChunkEvaluator
+
+
diff --git a/doc/fluid/api/metric/CompositeMetric.rst b/doc/fluid/api/metric/CompositeMetric.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1a752c376c00d17b526145196ba1ca63f7cd65ad
--- /dev/null
+++ b/doc/fluid/api/metric/CompositeMetric.rst
@@ -0,0 +1,7 @@
+.. _api_metric_CompositeMetric:
+
+CompositeMetric
+-------------------------------
+:doc_source: paddle.fluid.metrics.CompositeMetric
+
+
diff --git a/doc/fluid/api/metric/DetectionMAP.rst b/doc/fluid/api/metric/DetectionMAP.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a1175150c8785242ad997c4794711fbc945e89a3
--- /dev/null
+++ b/doc/fluid/api/metric/DetectionMAP.rst
@@ -0,0 +1,7 @@
+.. _api_metric_DetectionMAP:
+
+DetectionMAP
+-------------------------------
+:doc_source: paddle.fluid.metrics.DetectionMAP
+
+
diff --git a/doc/fluid/api/metric/EditDistance.rst b/doc/fluid/api/metric/EditDistance.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b8902e4e8ebfb51385ae518c5e119aaf07f0afab
--- /dev/null
+++ b/doc/fluid/api/metric/EditDistance.rst
@@ -0,0 +1,7 @@
+.. _api_metric_EditDistance:
+
+EditDistance
+-------------------------------
+:doc_source: paddle.fluid.metrics.EditDistance
+
+
diff --git a/doc/fluid/api/metric/Precision.rst b/doc/fluid/api/metric/Precision.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fcbb3ccda0f4ecfb0ae068bed8e004b094768435
--- /dev/null
+++ b/doc/fluid/api/metric/Precision.rst
@@ -0,0 +1,7 @@
+.. _api_metric_Precision:
+
+Precision
+-------------------------------
+:doc_source: paddle.fluid.metrics.Precision
+
+
diff --git a/doc/fluid/api/metric/Recall.rst b/doc/fluid/api/metric/Recall.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7f28b47824166c3137d035526c7ae3c2d08beaaf
--- /dev/null
+++ b/doc/fluid/api/metric/Recall.rst
@@ -0,0 +1,7 @@
+.. _api_metric_Recall:
+
+Recall
+-------------------------------
+:doc_source: paddle.fluid.metrics.Recall
+
+
diff --git a/doc/fluid/api/metric/accuracy.rst b/doc/fluid/api/metric/accuracy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d2395456b67127d758792c2c9a685c30d369117
--- /dev/null
+++ b/doc/fluid/api/metric/accuracy.rst
@@ -0,0 +1,7 @@
+.. _api_metric_accuracy:
+
+accuracy
+-------------------------------
+:doc_source: paddle.fluid.layers.metric_op.accuracy
+
+
diff --git a/doc/fluid/api/metric/auc.rst b/doc/fluid/api/metric/auc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..475751ed33ca4ffe4f908fbde115b7b5444d4990
--- /dev/null
+++ b/doc/fluid/api/metric/auc.rst
@@ -0,0 +1,7 @@
+.. _api_metric_auc:
+
+auc
+-------------------------------
+:doc_source: paddle.fluid.layers.metric_op.auc
+
+
diff --git a/doc/fluid/api/metric/chunk_eval.rst b/doc/fluid/api/metric/chunk_eval.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3a8a19c9ac350820e58813e284aee34d9788c385
--- /dev/null
+++ b/doc/fluid/api/metric/chunk_eval.rst
@@ -0,0 +1,7 @@
+.. _api_metric_chunk_eval:
+
+chunk_eval
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.chunk_eval
+
+
diff --git a/doc/fluid/api/metric/cos_sim.rst b/doc/fluid/api/metric/cos_sim.rst
new file mode 100644
index 0000000000000000000000000000000000000000..44f9e1b3561f33ce3a0276fe4751602438e36233
--- /dev/null
+++ b/doc/fluid/api/metric/cos_sim.rst
@@ -0,0 +1,7 @@
+.. _api_metric_cos_sim:
+
+cos_sim
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.cos_sim
+
+
diff --git a/doc/fluid/api/metric/mean_iou.rst b/doc/fluid/api/metric/mean_iou.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cff10d56ee46bb209f1288b589c4f448de2f4557
--- /dev/null
+++ b/doc/fluid/api/metric/mean_iou.rst
@@ -0,0 +1,7 @@
+.. _api_metric_mean_iou:
+
+mean_iou
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.mean_iou
+
+
diff --git a/doc/fluid/api/nn.rst b/doc/fluid/api/nn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ef50a1e01e4a900edcc8a1eb073144477b08d584
--- /dev/null
+++ b/doc/fluid/api/nn.rst
@@ -0,0 +1,175 @@
+=========
+paddle.nn
+=========
+
+.. toctree::
+ :maxdepth: 1
+
+ nn/activation.rst
+ nn/adaptive_pool2d.rst
+ nn/adaptive_pool3d.rst
+ nn/add_position_encoding.rst
+ nn/affine_channel.rst
+ nn/affine_grid.rst
+ nn/anchor_generator.rst
+ nn/assign.rst
+ nn/BatchNorm.rst
+ nn/beam_search.rst
+ nn/beam_search_decode.rst
+ nn/BilinearTensorProduct.rst
+ nn/bipartite_match.rst
+ nn/box_clip.rst
+ nn/box_coder.rst
+ nn/box_decoder_and_assign.rst
+ nn/bpr_loss.rst
+ nn/brelu.rst
+ nn/case.rst
+ nn/center_loss.rst
+ nn/clip.rst
+ nn/clip_by_norm.rst
+ nn/collect_fpn_proposals.rst
+ nn/cond.rst
+ nn/ConstantPad1d.rst
+ nn/ConstantPad2d.rst
+ nn/ConstantPad3d.rst
+ nn/continuous_value_model.rst
+ nn/Conv2d.rst
+ nn/Conv3d.rst
+ nn/ConvTranspose2d.rst
+ nn/ConvTranspose3d.rst
+ nn/cosine_decay.rst
+ nn/cosine_similarity.rst
+ nn/CosineSimilarity.rst
+ nn/cross_entropy.rst
+ nn/data.rst
+ nn/deformable_roi_pooling.rst
+ nn/density_prior_box.rst
+ nn/detection_output.rst
+ nn/dice_loss.rst
+ nn/distribute_fpn_proposals.rst
+ nn/dropout.rst
+ nn/edit_distance.rst
+ nn/elu.rst
+ nn/Embedding.rst
+ nn/erf.rst
+ nn/exponential_decay.rst
+ nn/filter_by_instag.rst
+ nn/fsp_matrix.rst
+ nn/functional.rst
+ nn/gather_tree.rst
+ nn/gelu.rst
+ nn/generate_mask_labels.rst
+ nn/generate_proposal_labels.rst
+ nn/generate_proposals.rst
+ nn/GradientClipByGlobalNorm.rst
+ nn/GradientClipByNorm.rst
+ nn/GradientClipByValue.rst
+ nn/grid_sampler.rst
+ nn/GroupNorm.rst
+ nn/hardshrink.rst
+ nn/hardtanh.rst
+ nn/hard_sigmoid.rst
+ nn/hard_swish.rst
+ nn/hash.rst
+ nn/hsigmoid.rst
+ nn/huber_loss.rst
+ nn/image_resize.rst
+ nn/image_resize_short.rst
+ nn/initializer.rst
+ nn/inverse_time_decay.rst
+ nn/iou_similarity.rst
+ nn/kldiv_loss.rst
+ nn/l2_normalize.rst
+ nn/label_smooth.rst
+ nn/Layer.rst
+ nn/LayerList.rst
+ nn/LayerNorm.rst
+ nn/leaky_relu.rst
+ nn/Linear.rst
+ nn/linear_lr_warmup.rst
+ nn/log_loss.rst
+ nn/log_sigmoid.rst
+ nn/log_softmax.rst
+ nn/loss.rst
+ nn/lrn.rst
+ nn/margin_rank_loss.rst
+ nn/matrix_nms.rst
+ nn/maxout.rst
+ nn/mse_loss.rst
+ nn/multiclass_nms.rst
+ nn/natural_exp_decay.rst
+ nn/noam_decay.rst
+ nn/npair_loss.rst
+ nn/one_hot.rst
+ nn/pad.rst
+ nn/pad2d.rst
+ nn/pad_constant_like.rst
+ nn/ParameterList.rst
+ nn/piecewise_decay.rst
+ nn/pixel_shuffle.rst
+ nn/polygon_box_transform.rst
+ nn/polynomial_decay.rst
+ nn/Pool2D.rst
+ nn/pool3d.rst
+ nn/prelu.rst
+ nn/prior_box.rst
+ nn/prroi_pool.rst
+ nn/psroi_pool.rst
+ nn/random_crop.rst
+ nn/rank_loss.rst
+ nn/ReflectionPad1d.rst
+ nn/ReflectionPad2d.rst
+ nn/ReLU.rst
+ nn/relu.rst
+ nn/relu6.rst
+ nn/ReplicationPad1d.rst
+ nn/ReplicationPad2d.rst
+ nn/ReplicationPad3d.rst
+ nn/resize_bilinear.rst
+ nn/resize_nearest.rst
+ nn/resize_trilinear.rst
+ nn/retinanet_detection_output.rst
+ nn/retinanet_target_assign.rst
+ nn/roi_align.rst
+ nn/roi_perspective_transform.rst
+ nn/roi_pool.rst
+ nn/row_conv.rst
+ nn/rpn_target_assign.rst
+ nn/sampled_softmax_with_cross_entropy.rst
+ nn/selu.rst
+ nn/Sequential.rst
+ nn/shuffle_channel.rst
+ nn/sigmoid_cross_entropy_with_logits.rst
+ nn/sigmoid_focal_loss.rst
+ nn/similarity_focus.rst
+ nn/smooth_l1.rst
+ nn/soft_relu.rst
+ nn/softmax.rst
+ nn/softmax_with_cross_entropy.rst
+ nn/softplus.rst
+ nn/softshrink.rst
+ nn/softsign.rst
+ nn/space_to_depth.rst
+ nn/SpectralNorm.rst
+ nn/square_error_cost.rst
+ nn/ssd_loss.rst
+ nn/swish.rst
+ nn/switch_case.rst
+ nn/tanhshrink.rst
+ nn/target_assign.rst
+ nn/teacher_student_sigmoid_loss.rst
+ nn/temporal_shift.rst
+ nn/thresholded_relu.rst
+ nn/unfold.rst
+ nn/warpctc.rst
+ nn/while_loop.rst
+ nn/yolo_box.rst
+ nn/yolov3_loss.rst
+ nn/functional/loss/margin_ranking_loss.rst
+ nn/functional/activation/sigmoid.rst
+ nn/layer/loss/MarginRankingLoss.rst
+ nn/ZeroPad2d.rst
+ nn/AdaptiveAvgPool2d.rst
+ nn/AdaptiveAvgPool3d.rst
+ nn/layer/activation/Sigmoid.rst
+ nn/Bilinear.rst
\ No newline at end of file
diff --git a/doc/fluid/api/nn/AdaptiveAvgPool2d.rst b/doc/fluid/api/nn/AdaptiveAvgPool2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..26518d01d0e432d262e4434ced097fbf12e4a117
--- /dev/null
+++ b/doc/fluid/api/nn/AdaptiveAvgPool2d.rst
@@ -0,0 +1,10 @@
+.. _api_nn_pooling_AdaptiveAvgPool2d:
+
+AdaptiveAvgPool2d
+-----------------
+
+.. autoclass:: paddle.nn.AdaptiveAvgPool2d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/AdaptiveAvgPool3d.rst b/doc/fluid/api/nn/AdaptiveAvgPool3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..70f9e87f2b59bfbf7f754e4a615c49fa85fb6b4f
--- /dev/null
+++ b/doc/fluid/api/nn/AdaptiveAvgPool3d.rst
@@ -0,0 +1,10 @@
+.. _api_nn_pooling_AdaptiveAvgPool3d:
+
+AdaptiveAvgPool3d
+-----------------
+
+.. autoclass:: paddle.nn.AdaptiveAvgPool3d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/BatchNorm.rst b/doc/fluid/api/nn/BatchNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b88ccdfa124f2bb0fab620878c61eb62ba22cbe0
--- /dev/null
+++ b/doc/fluid/api/nn/BatchNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_BatchNorm:
+
+BatchNorm
+-------------------------------
+:doc_source: paddle.fluid.dygraph.BatchNorm
+
+
diff --git a/doc/fluid/api/nn/Bilinear.rst b/doc/fluid/api/nn/Bilinear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cbd2847b06f01a779e930c042b614219714b89a7
--- /dev/null
+++ b/doc/fluid/api/nn/Bilinear.rst
@@ -0,0 +1,8 @@
+.. _api_nn_Bilinear:
+
+Bilinear
+-------------------------------
+
+.. autofunction:: paddle.nn.Bilinear
+ :noindex:
+
diff --git a/doc/fluid/api/nn/BilinearTensorProduct.rst b/doc/fluid/api/nn/BilinearTensorProduct.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ca0a1245228168f0f036ab0b7f0aff7819928870
--- /dev/null
+++ b/doc/fluid/api/nn/BilinearTensorProduct.rst
@@ -0,0 +1,7 @@
+.. _api_nn_BilinearTensorProduct:
+
+BilinearTensorProduct
+-------------------------------
+:doc_source: paddle.fluid.dygraph.BilinearTensorProduct
+
+
diff --git a/doc/fluid/api/nn/ConstantPad1d.rst b/doc/fluid/api/nn/ConstantPad1d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c50dceea85368677f9b7655f1e086266afc87746
--- /dev/null
+++ b/doc/fluid/api/nn/ConstantPad1d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ConstantPad1d:
+
+ConstantPad1d
+-------------------------------
+:doc_source: paddle.nn.ConstantPad1d
+
+
diff --git a/doc/fluid/api/nn/ConstantPad2d.rst b/doc/fluid/api/nn/ConstantPad2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a1b614c7e5e9f2521cf74f5a9e5641632f9f1e86
--- /dev/null
+++ b/doc/fluid/api/nn/ConstantPad2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ConstantPad2d:
+
+ConstantPad2d
+-------------------------------
+:doc_source: paddle.nn.ConstantPad2d
+
+
diff --git a/doc/fluid/api/nn/ConstantPad3d.rst b/doc/fluid/api/nn/ConstantPad3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..65bf1aad222361d749893a73882dde6005803290
--- /dev/null
+++ b/doc/fluid/api/nn/ConstantPad3d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ConstantPad3d:
+
+ConstantPad3d
+-------------------------------
+:doc_source: paddle.nn.ConstantPad3d
+
+
diff --git a/doc/fluid/api/nn/Conv2d.rst b/doc/fluid/api/nn/Conv2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..239d4440f1463028830fe7678a04fe909d4392ce
--- /dev/null
+++ b/doc/fluid/api/nn/Conv2d.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_conv_Conv2d:
+
+Conv2d
+------
+
+.. autoclass:: paddle.nn.layer.conv.Conv2d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/Conv3d.rst b/doc/fluid/api/nn/Conv3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..23a878b43d5ea9fbc0d8ca1e0f9a6cad806782db
--- /dev/null
+++ b/doc/fluid/api/nn/Conv3d.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_conv_Conv3d:
+
+Conv3d
+------
+
+.. autoclass:: paddle.nn.layer.conv.Conv3d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/ConvTranspose2d.rst b/doc/fluid/api/nn/ConvTranspose2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c1198338dea65925b1097cde244e9b22b3d618c
--- /dev/null
+++ b/doc/fluid/api/nn/ConvTranspose2d.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_conv_ConvTranspose2d:
+
+ConvTranspose2d
+---------------
+
+.. autoclass:: paddle.nn.layer.conv.ConvTranspose2d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/ConvTranspose3d.rst b/doc/fluid/api/nn/ConvTranspose3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d0345a4f3498ced130156c0380393df6c3fb85d
--- /dev/null
+++ b/doc/fluid/api/nn/ConvTranspose3d.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_conv_ConvTranspose3d:
+
+ConvTranspose3d
+---------------
+
+.. autoclass:: paddle.nn.layer.conv.ConvTranspose3d
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/CosineSimilarity.rst b/doc/fluid/api/nn/CosineSimilarity.rst
new file mode 100644
index 0000000000000000000000000000000000000000..36e235f0752ed84dce258dae4e3248e1173cad8a
--- /dev/null
+++ b/doc/fluid/api/nn/CosineSimilarity.rst
@@ -0,0 +1,7 @@
+.. _api_nn_CosineSimilarity:
+
+CosineSimilarity
+-------------------------------
+:doc_source: paddle.nn.CosineSimilarity
+
+
diff --git a/doc/fluid/api/nn/Embedding.rst b/doc/fluid/api/nn/Embedding.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b9ec790b5deb9232eac98d57896b7f730ab297da
--- /dev/null
+++ b/doc/fluid/api/nn/Embedding.rst
@@ -0,0 +1,7 @@
+.. _api_nn_Embedding:
+
+Embedding
+-------------------------------
+:doc_source: paddle.fluid.dygraph.Embedding
+
+
diff --git a/doc/fluid/api/nn/GradientClipByGlobalNorm.rst b/doc/fluid/api/nn/GradientClipByGlobalNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c414d9e4135f8e1f26e886935acea806eb43e3a
--- /dev/null
+++ b/doc/fluid/api/nn/GradientClipByGlobalNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_GradientClipByGlobalNorm:
+
+GradientClipByGlobalNorm
+-------------------------------
+:doc_source: paddle.fluid.clip.GradientClipByGlobalNorm
+
+
diff --git a/doc/fluid/api/nn/GradientClipByNorm.rst b/doc/fluid/api/nn/GradientClipByNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cf514e3e3c651815b3709ce937d66d77cef730f0
--- /dev/null
+++ b/doc/fluid/api/nn/GradientClipByNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_GradientClipByNorm:
+
+GradientClipByNorm
+-------------------------------
+:doc_source: paddle.fluid.clip.GradientClipByNorm
+
+
diff --git a/doc/fluid/api/nn/GradientClipByValue.rst b/doc/fluid/api/nn/GradientClipByValue.rst
new file mode 100644
index 0000000000000000000000000000000000000000..52d86f6d05072ef49e0e682fee32c0193fdaada7
--- /dev/null
+++ b/doc/fluid/api/nn/GradientClipByValue.rst
@@ -0,0 +1,7 @@
+.. _api_nn_GradientClipByValue:
+
+GradientClipByValue
+-------------------------------
+:doc_source: paddle.fluid.clip.GradientClipByValue
+
+
diff --git a/doc/fluid/api/nn/GroupNorm.rst b/doc/fluid/api/nn/GroupNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c87d6b16b9d0b02fe0833198599fa6024f8b0a9
--- /dev/null
+++ b/doc/fluid/api/nn/GroupNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_GroupNorm:
+
+GroupNorm
+-------------------------------
+:doc_source: paddle.fluid.dygraph.GroupNorm
+
+
diff --git a/doc/fluid/api/nn/Layer.rst b/doc/fluid/api/nn/Layer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..326cbc4098aaed53991e5cb5851090d5d9e5c041
--- /dev/null
+++ b/doc/fluid/api/nn/Layer.rst
@@ -0,0 +1,7 @@
+.. _api_nn_Layer:
+
+Layer
+-------------------------------
+:doc_source: paddle.fluid.dygraph.layers.Layer
+
+
diff --git a/doc/fluid/api/nn/LayerList.rst b/doc/fluid/api/nn/LayerList.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1589900fd7c9c0cf7e2dedf622be3d3eac7722b1
--- /dev/null
+++ b/doc/fluid/api/nn/LayerList.rst
@@ -0,0 +1,7 @@
+.. _api_nn_LayerList:
+
+LayerList
+-------------------------------
+:doc_source: paddle.fluid.dygraph.container.LayerList
+
+
diff --git a/doc/fluid/api/nn/LayerNorm.rst b/doc/fluid/api/nn/LayerNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..162d53ad754cbaaa4c96430309fc30f1c6820fdc
--- /dev/null
+++ b/doc/fluid/api/nn/LayerNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_LayerNorm:
+
+LayerNorm
+-------------------------------
+:doc_source: paddle.fluid.dygraph.LayerNorm
+
+
diff --git a/doc/fluid/api/nn/Linear.rst b/doc/fluid/api/nn/Linear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..239d4d2d22d4558aef5cb1fa7fb993e156871596
--- /dev/null
+++ b/doc/fluid/api/nn/Linear.rst
@@ -0,0 +1,7 @@
+.. _api_nn_Linear:
+
+Linear
+-------------------------------
+:doc_source: paddle.fluid.dygraph.Linear
+
+
diff --git a/doc/fluid/api/nn/ParameterList.rst b/doc/fluid/api/nn/ParameterList.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b278270dc5b4bb8fcf7248a3c58e5210ac26e219
--- /dev/null
+++ b/doc/fluid/api/nn/ParameterList.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ParameterList:
+
+ParameterList
+-------------------------------
+:doc_source: paddle.fluid.dygraph.container.ParameterList
+
+
diff --git a/doc/fluid/api/nn/Pool2D.rst b/doc/fluid/api/nn/Pool2D.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e34535daab0f95181dea2841a9ffb7c5732b7a50
--- /dev/null
+++ b/doc/fluid/api/nn/Pool2D.rst
@@ -0,0 +1,7 @@
+.. _api_nn_Pool2D:
+
+Pool2D
+-------------------------------
+:doc_source: paddle.fluid.dygraph.Pool2D
+
+
diff --git a/doc/fluid/api/nn/ReLU.rst b/doc/fluid/api/nn/ReLU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b186a2b174d77ad0fc635d169fa065b659358c99
--- /dev/null
+++ b/doc/fluid/api/nn/ReLU.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReLU:
+
+ReLU
+-------------------------------
+:doc_source: paddle.fluid.layers.relu
+
+
diff --git a/doc/fluid/api/nn/ReflectionPad1d.rst b/doc/fluid/api/nn/ReflectionPad1d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..48998894854e59228aa26de7ec1bdbb8c3e98ec4
--- /dev/null
+++ b/doc/fluid/api/nn/ReflectionPad1d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReflectionPad1d:
+
+ReflectionPad1d
+-------------------------------
+:doc_source: paddle.nn.ReflectionPad1d
+
+
diff --git a/doc/fluid/api/nn/ReflectionPad2d.rst b/doc/fluid/api/nn/ReflectionPad2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..50ade0a1acd71415d99839addbf85f271a4dc854
--- /dev/null
+++ b/doc/fluid/api/nn/ReflectionPad2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReflectionPad2d:
+
+ReflectionPad2d
+-------------------------------
+:doc_source: paddle.nn.ReflectionPad2d
+
+
diff --git a/doc/fluid/api/nn/ReplicationPad1d.rst b/doc/fluid/api/nn/ReplicationPad1d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..da4c6217ac8a94d30b1e677732d27847bacd86cd
--- /dev/null
+++ b/doc/fluid/api/nn/ReplicationPad1d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReplicationPad1d:
+
+ReplicationPad1d
+-------------------------------
+:doc_source: paddle.nn.ReplicationPad1d
+
+
diff --git a/doc/fluid/api/nn/ReplicationPad2d.rst b/doc/fluid/api/nn/ReplicationPad2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b388ba3b97290b8bccdbcf49621ae16170909694
--- /dev/null
+++ b/doc/fluid/api/nn/ReplicationPad2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReplicationPad2d:
+
+ReplicationPad2d
+-------------------------------
+:doc_source: paddle.nn.ReplicationPad2d
+
+
diff --git a/doc/fluid/api/nn/ReplicationPad3d.rst b/doc/fluid/api/nn/ReplicationPad3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9faba1ccda736bb2b8593e5a84593b71a88f365b
--- /dev/null
+++ b/doc/fluid/api/nn/ReplicationPad3d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReplicationPad3d:
+
+ReplicationPad3d
+-------------------------------
+:doc_source: paddle.nn.ReplicationPad3d
+
+
diff --git a/doc/fluid/api/nn/Sequential.rst b/doc/fluid/api/nn/Sequential.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7f12fe92b7898310608fa2b3443d975eb59fd001
--- /dev/null
+++ b/doc/fluid/api/nn/Sequential.rst
@@ -0,0 +1,7 @@
+.. _api_nn_Sequential:
+
+Sequential
+-------------------------------
+:doc_source: paddle.fluid.dygraph.container.Sequential
+
+
diff --git a/doc/fluid/api/nn/SpectralNorm.rst b/doc/fluid/api/nn/SpectralNorm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aff8682c35ae6b0f57bc7dff861bb21f7881fd64
--- /dev/null
+++ b/doc/fluid/api/nn/SpectralNorm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_SpectralNorm:
+
+SpectralNorm
+-------------------------------
+:doc_source: paddle.fluid.dygraph.SpectralNorm
+
+
diff --git a/doc/fluid/api/nn/ZeroPad2d.rst b/doc/fluid/api/nn/ZeroPad2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1b71c19f98021f7cdefa4ff44425434bbbd55252
--- /dev/null
+++ b/doc/fluid/api/nn/ZeroPad2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ZeroPad2d:
+
+ZeroPad2d
+-------------------------------
+:doc_source: paddle.nn.ZeroPad2d
+
+
diff --git a/doc/fluid/api/nn/activation.rst b/doc/fluid/api/nn/activation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5e1d1be87c05e0c533311573cec81ca980f88b92
--- /dev/null
+++ b/doc/fluid/api/nn/activation.rst
@@ -0,0 +1,22 @@
+==========
+activation
+==========
+
+.. toctree::
+ :maxdepth: 1
+
+ activation/ELU.rst
+ activation/GELU.rst
+ activation/Hardshrink.rst
+ activation/Tanh.rst
+ activation/Hardtanh.rst
+ activation/LogSigmoid.rst
+ activation/PReLU.rst
+ activation/ReLU.rst
+ activation/ReLU6.rst
+ activation/SELU.rst
+ activation/Softmax.rst
+ activation/Softplus.rst
+ activation/Softshrink.rst
+ activation/Softsign.rst
+ activation/Tanhshrink.rst
diff --git a/doc/fluid/api/nn/activation/ELU.rst b/doc/fluid/api/nn/activation/ELU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d98897b3706e5dc2796989c68c4b86adcdf5fa31
--- /dev/null
+++ b/doc/fluid/api/nn/activation/ELU.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_ELU:
+
+ELU
+-------------------------------
+
+.. autoclass:: paddle.nn.ELU
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/GELU.rst b/doc/fluid/api/nn/activation/GELU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3aa80fa5b5b45af194ec0c71e4a537a4e9cb0bec
--- /dev/null
+++ b/doc/fluid/api/nn/activation/GELU.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_GELU:
+
+GELU
+-------------------------------
+
+.. autoclass:: paddle.nn.GELU
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/Hardshrink.rst b/doc/fluid/api/nn/activation/Hardshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..552e6a2a9883ed37f55544ed0f148920bd08f46a
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Hardshrink.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_activation_Hardshrink:
+
+Hardshrink
+---------
+
+.. autoclass:: paddle.nn.activation.Hardshrink
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Hardtanh.rst b/doc/fluid/api/nn/activation/Hardtanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5509d334ae5a21f5af8bbd7f53c0bce46453a120
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Hardtanh.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_Hardtanh:
+
+Hardtanh
+-------------------------------
+
+.. autoclass:: paddle.nn.Hardtanh
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/LogSigmoid.rst b/doc/fluid/api/nn/activation/LogSigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0407712d267bb29f215714ca77782ae6dce1eed9
--- /dev/null
+++ b/doc/fluid/api/nn/activation/LogSigmoid.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_LogSigmoid:
+
+LogSigmoid
+-------------------------------
+
+.. autoclass:: paddle.nn.LogSigmoid
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/PReLU.rst b/doc/fluid/api/nn/activation/PReLU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb4c2c3f0055c805e516816036de1571989c5d2c
--- /dev/null
+++ b/doc/fluid/api/nn/activation/PReLU.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_PReLU:
+
+PReLU
+-------------------------------
+
+.. autoclass:: paddle.nn.PReLU
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/ReLU.rst b/doc/fluid/api/nn/activation/ReLU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..32a742dac0a9c1cda06128c2d5985448d0c7ab47
--- /dev/null
+++ b/doc/fluid/api/nn/activation/ReLU.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_ReLU:
+
+ReLU
+-------------------------------
+
+.. autoclass:: paddle.nn.ReLU
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/ReLU6.rst b/doc/fluid/api/nn/activation/ReLU6.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8c0f9dfdd6dcc42eb5a0e4a39d1ad7e3026444b0
--- /dev/null
+++ b/doc/fluid/api/nn/activation/ReLU6.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_ReLU6:
+
+ReLU6
+---------
+
+.. autoclass:: paddle.nn.ReLU6
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/SELU.rst b/doc/fluid/api/nn/activation/SELU.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1817d2f7f01233390eca8bf91315ff09e068e6bd
--- /dev/null
+++ b/doc/fluid/api/nn/activation/SELU.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_SELU:
+
+SELU
+---------
+
+.. autoclass:: paddle.nn.SELU
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Softmax.rst b/doc/fluid/api/nn/activation/Softmax.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a39a3161092cba5a774a5bcb9817e42049d5e039
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Softmax.rst
@@ -0,0 +1,7 @@
+.. _api_nn_activation_Softmax:
+
+Softmax
+-------------------------------
+
+.. autoclass:: paddle.nn.Softmax
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/activation/Softplus.rst b/doc/fluid/api/nn/activation/Softplus.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3d914222a4b6ac4996d867ac1bfa06cf6fdd4d06
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Softplus.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_Softplus:
+
+Softplus
+---------
+
+.. autoclass:: paddle.nn.Softplus
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Softshrink.rst b/doc/fluid/api/nn/activation/Softshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c332c8fe505563bda387aec3020691e5fe8525a
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Softshrink.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_Softshrink:
+
+Softshrink
+----------
+
+.. autoclass:: paddle.nn.Softshrink
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Softsign.rst b/doc/fluid/api/nn/activation/Softsign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9bedacb898e320ebd8986004330fceda2af55479
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Softsign.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_Softsign:
+
+Softsign
+---------
+
+.. autoclass:: paddle.nn.Softsign
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Tanh.rst b/doc/fluid/api/nn/activation/Tanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..80cb491331f1f6bbe8afec91c444709c196ce131
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Tanh.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_activation_Tanh:
+
+Tanh
+---------
+
+.. autoclass:: paddle.nn.layer.activation.Tanh
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/activation/Tanhshrink.rst b/doc/fluid/api/nn/activation/Tanhshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8715fa2726342bcb988a2a442240b7a2673abbc3
--- /dev/null
+++ b/doc/fluid/api/nn/activation/Tanhshrink.rst
@@ -0,0 +1,8 @@
+.. _api_nn_activation_Tanhshrink:
+
+Tanhshrink
+----------
+
+.. autoclass:: paddle.nn.Tanhshrink
+ :noindex:
+
diff --git a/doc/fluid/api/nn/adaptive_pool2d.rst b/doc/fluid/api/nn/adaptive_pool2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dae2332c76d95c9788208dfbc6ea5ee6ae14ad43
--- /dev/null
+++ b/doc/fluid/api/nn/adaptive_pool2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_adaptive_pool2d:
+
+adaptive_pool2d
+-------------------------------
+:doc_source: paddle.fluid.layers.adaptive_pool2d
+
+
diff --git a/doc/fluid/api/nn/adaptive_pool3d.rst b/doc/fluid/api/nn/adaptive_pool3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8b19cca895edd712090eeb53d220c328bd6545bd
--- /dev/null
+++ b/doc/fluid/api/nn/adaptive_pool3d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_adaptive_pool3d:
+
+adaptive_pool3d
+-------------------------------
+:doc_source: paddle.fluid.layers.adaptive_pool3d
+
+
diff --git a/doc/fluid/api/nn/add_position_encoding.rst b/doc/fluid/api/nn/add_position_encoding.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7f07f2ba62e729502edd54f526d516564259bdc6
--- /dev/null
+++ b/doc/fluid/api/nn/add_position_encoding.rst
@@ -0,0 +1,7 @@
+.. _api_nn_add_position_encoding:
+
+add_position_encoding
+-------------------------------
+:doc_source: paddle.fluid.layers.add_position_encoding
+
+
diff --git a/doc/fluid/api/nn/affine_channel.rst b/doc/fluid/api/nn/affine_channel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0a54a9e083330daf815f3592889b492e568da34f
--- /dev/null
+++ b/doc/fluid/api/nn/affine_channel.rst
@@ -0,0 +1,7 @@
+.. _api_nn_affine_channel:
+
+affine_channel
+-------------------------------
+:doc_source: paddle.fluid.layers.affine_channel
+
+
diff --git a/doc/fluid/api/nn/affine_grid.rst b/doc/fluid/api/nn/affine_grid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cc7cea269e63d05dd24cd3089c473b580211b904
--- /dev/null
+++ b/doc/fluid/api/nn/affine_grid.rst
@@ -0,0 +1,7 @@
+.. _api_nn_affine_grid:
+
+affine_grid
+-------------------------------
+:doc_source: paddle.fluid.layers.affine_grid
+
+
diff --git a/doc/fluid/api/nn/anchor_generator.rst b/doc/fluid/api/nn/anchor_generator.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7bec2a4776765c888dd8ecdaab221c61308fd894
--- /dev/null
+++ b/doc/fluid/api/nn/anchor_generator.rst
@@ -0,0 +1,7 @@
+.. _api_nn_anchor_generator:
+
+anchor_generator
+-------------------------------
+:doc_source: paddle.fluid.layers.anchor_generator
+
+
diff --git a/doc/fluid/api/nn/assign.rst b/doc/fluid/api/nn/assign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..946b6b807def0e8b49f7d903c52be0badba09235
--- /dev/null
+++ b/doc/fluid/api/nn/assign.rst
@@ -0,0 +1,7 @@
+.. _api_nn_assign:
+
+assign
+-------------------------------
+:doc_source: paddle.fluid.layers.assign
+
+
diff --git a/doc/fluid/api/nn/beam_search.rst b/doc/fluid/api/nn/beam_search.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67db0c4dba18512e4b315de41637185f55210240
--- /dev/null
+++ b/doc/fluid/api/nn/beam_search.rst
@@ -0,0 +1,7 @@
+.. _api_nn_beam_search:
+
+beam_search
+-------------------------------
+:doc_source: paddle.fluid.layers.beam_search
+
+
diff --git a/doc/fluid/api/nn/beam_search_decode.rst b/doc/fluid/api/nn/beam_search_decode.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc1cb52203bc8c7f57127f3e12bbdb9d481810fa
--- /dev/null
+++ b/doc/fluid/api/nn/beam_search_decode.rst
@@ -0,0 +1,7 @@
+.. _api_nn_beam_search_decode:
+
+beam_search_decode
+-------------------------------
+:doc_source: paddle.fluid.layers.beam_search_decode
+
+
diff --git a/doc/fluid/api/nn/bipartite_match.rst b/doc/fluid/api/nn/bipartite_match.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cf48e0589418f80c0ea25cbd68808464697d8572
--- /dev/null
+++ b/doc/fluid/api/nn/bipartite_match.rst
@@ -0,0 +1,7 @@
+.. _api_nn_bipartite_match:
+
+bipartite_match
+-------------------------------
+:doc_source: paddle.fluid.layers.bipartite_match
+
+
diff --git a/doc/fluid/api/nn/box_clip.rst b/doc/fluid/api/nn/box_clip.rst
new file mode 100644
index 0000000000000000000000000000000000000000..23a86b82bfb03fba37f29f16daf96a118b780a79
--- /dev/null
+++ b/doc/fluid/api/nn/box_clip.rst
@@ -0,0 +1,7 @@
+.. _api_nn_box_clip:
+
+box_clip
+-------------------------------
+:doc_source: paddle.fluid.layers.box_clip
+
+
diff --git a/doc/fluid/api/nn/box_coder.rst b/doc/fluid/api/nn/box_coder.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8500acde7cbb63947cfad7d8ef01c3c6a75852b7
--- /dev/null
+++ b/doc/fluid/api/nn/box_coder.rst
@@ -0,0 +1,7 @@
+.. _api_nn_box_coder:
+
+box_coder
+-------------------------------
+:doc_source: paddle.fluid.layers.box_coder
+
+
diff --git a/doc/fluid/api/nn/box_decoder_and_assign.rst b/doc/fluid/api/nn/box_decoder_and_assign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66dcc3223c03f3b7bc858e4228935e703fa2caeb
--- /dev/null
+++ b/doc/fluid/api/nn/box_decoder_and_assign.rst
@@ -0,0 +1,7 @@
+.. _api_nn_box_decoder_and_assign:
+
+box_decoder_and_assign
+-------------------------------
+:doc_source: paddle.fluid.layers.box_decoder_and_assign
+
+
diff --git a/doc/fluid/api/nn/bpr_loss.rst b/doc/fluid/api/nn/bpr_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d47ce597af9e8be264ebdc8dfb099df45df54f5
--- /dev/null
+++ b/doc/fluid/api/nn/bpr_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_bpr_loss:
+
+bpr_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.bpr_loss
+
+
diff --git a/doc/fluid/api/nn/brelu.rst b/doc/fluid/api/nn/brelu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..093d2cb408cfebffb13f3f2f1efb3a589e7e23ba
--- /dev/null
+++ b/doc/fluid/api/nn/brelu.rst
@@ -0,0 +1,7 @@
+.. _api_nn_brelu:
+
+brelu
+-------------------------------
+:doc_source: paddle.fluid.layers.brelu
+
+
diff --git a/doc/fluid/api/nn/case.rst b/doc/fluid/api/nn/case.rst
new file mode 100644
index 0000000000000000000000000000000000000000..51e8a02b68bba61f7e79f655f75e478fbcaff792
--- /dev/null
+++ b/doc/fluid/api/nn/case.rst
@@ -0,0 +1,7 @@
+.. _api_nn_case:
+
+case
+-------------------------------
+:doc_source: paddle.fluid.layers.case
+
+
diff --git a/doc/fluid/api/nn/center_loss.rst b/doc/fluid/api/nn/center_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..315cadcfa825ee984321f394725e05c515b7a5f9
--- /dev/null
+++ b/doc/fluid/api/nn/center_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_center_loss:
+
+center_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.center_loss
+
+
diff --git a/doc/fluid/api/nn/clip.rst b/doc/fluid/api/nn/clip.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c4f53ffd8ef9f7a72277004f485a036f20ee9893
--- /dev/null
+++ b/doc/fluid/api/nn/clip.rst
@@ -0,0 +1,7 @@
+.. _api_nn_clip:
+
+clip
+-------------------------------
+:doc_source: paddle.fluid.layers.clip
+
+
diff --git a/doc/fluid/api/nn/clip_by_norm.rst b/doc/fluid/api/nn/clip_by_norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b6ea08ae79320cc30b05c96df0beb7a089fd487d
--- /dev/null
+++ b/doc/fluid/api/nn/clip_by_norm.rst
@@ -0,0 +1,7 @@
+.. _api_nn_clip_by_norm:
+
+clip_by_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.clip_by_norm
+
+
diff --git a/doc/fluid/api/nn/collect_fpn_proposals.rst b/doc/fluid/api/nn/collect_fpn_proposals.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ed338114695b413c346a613d8da75e6f126a0ed4
--- /dev/null
+++ b/doc/fluid/api/nn/collect_fpn_proposals.rst
@@ -0,0 +1,7 @@
+.. _api_nn_collect_fpn_proposals:
+
+collect_fpn_proposals
+-------------------------------
+:doc_source: paddle.fluid.layers.collect_fpn_proposals
+
+
diff --git a/doc/fluid/api/nn/cond.rst b/doc/fluid/api/nn/cond.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3abd98e61cee38074152cedf6b14ae4ab6462943
--- /dev/null
+++ b/doc/fluid/api/nn/cond.rst
@@ -0,0 +1,7 @@
+.. _api_nn_cond:
+
+cond
+-------------------------------
+:doc_source: paddle.fluid.layers.cond
+
+
diff --git a/doc/fluid/api/nn/continuous_value_model.rst b/doc/fluid/api/nn/continuous_value_model.rst
new file mode 100644
index 0000000000000000000000000000000000000000..95e3694575659092ae0864b42ca21e19b39c15b3
--- /dev/null
+++ b/doc/fluid/api/nn/continuous_value_model.rst
@@ -0,0 +1,7 @@
+.. _api_nn_continuous_value_model:
+
+continuous_value_model
+-------------------------------
+:doc_source: paddle.fluid.layers.continuous_value_model
+
+
diff --git a/doc/fluid/api/nn/cosine_decay.rst b/doc/fluid/api/nn/cosine_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0592799d43c6d883085c0fa7c67ffe29446e4253
--- /dev/null
+++ b/doc/fluid/api/nn/cosine_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_cosine_decay:
+
+cosine_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.cosine_decay
+
+
diff --git a/doc/fluid/api/nn/cosine_similarity.rst b/doc/fluid/api/nn/cosine_similarity.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d568494ff0536262c65a4ef3f8e80aad168d1819
--- /dev/null
+++ b/doc/fluid/api/nn/cosine_similarity.rst
@@ -0,0 +1,7 @@
+.. _api_nn_cosine_similarity:
+
+cosine_similarity
+-------------------------------
+:doc_source: paddle.nn.functional.cosine_similarity
+
+
diff --git a/doc/fluid/api/nn/cross_entropy.rst b/doc/fluid/api/nn/cross_entropy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..68aeb05528fa22bd986b586a77a3d11aed324eac
--- /dev/null
+++ b/doc/fluid/api/nn/cross_entropy.rst
@@ -0,0 +1,7 @@
+.. _api_nn_cross_entropy:
+
+cross_entropy
+-------------------------------
+:doc_source: paddle.fluid.layers.cross_entropy
+
+
diff --git a/doc/fluid/api/nn/data.rst b/doc/fluid/api/nn/data.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b4cb9b1d0037da6e6cf65b00995f01ab11e29306
--- /dev/null
+++ b/doc/fluid/api/nn/data.rst
@@ -0,0 +1,7 @@
+.. _api_nn_data:
+
+data
+-------------------------------
+:doc_source: paddle.fluid.data
+
+
diff --git a/doc/fluid/api/nn/deformable_roi_pooling.rst b/doc/fluid/api/nn/deformable_roi_pooling.rst
new file mode 100644
index 0000000000000000000000000000000000000000..eb675c94570eb5bb0ad5c58e63c4769e68c6bdcc
--- /dev/null
+++ b/doc/fluid/api/nn/deformable_roi_pooling.rst
@@ -0,0 +1,7 @@
+.. _api_nn_deformable_roi_pooling:
+
+deformable_roi_pooling
+-------------------------------
+:doc_source: paddle.fluid.layers.deformable_roi_pooling
+
+
diff --git a/doc/fluid/api/nn/density_prior_box.rst b/doc/fluid/api/nn/density_prior_box.rst
new file mode 100644
index 0000000000000000000000000000000000000000..008db64af8dd4f0a56c34a7c0ba8ccd08d15e546
--- /dev/null
+++ b/doc/fluid/api/nn/density_prior_box.rst
@@ -0,0 +1,7 @@
+.. _api_nn_density_prior_box:
+
+density_prior_box
+-------------------------------
+:doc_source: paddle.fluid.layers.density_prior_box
+
+
diff --git a/doc/fluid/api/nn/detection_output.rst b/doc/fluid/api/nn/detection_output.rst
new file mode 100644
index 0000000000000000000000000000000000000000..478a451d6b70668dcc03ebaea8ed86150d0a95ad
--- /dev/null
+++ b/doc/fluid/api/nn/detection_output.rst
@@ -0,0 +1,7 @@
+.. _api_nn_detection_output:
+
+detection_output
+-------------------------------
+:doc_source: paddle.fluid.layers.detection_output
+
+
diff --git a/doc/fluid/api/nn/dice_loss.rst b/doc/fluid/api/nn/dice_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f7ac57ebea346f99abfd659b4028aba1d53db2a6
--- /dev/null
+++ b/doc/fluid/api/nn/dice_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_dice_loss:
+
+dice_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.dice_loss
+
+
diff --git a/doc/fluid/api/nn/distribute_fpn_proposals.rst b/doc/fluid/api/nn/distribute_fpn_proposals.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1a40b2e71db3acccb899731a30fd8cbd32c71332
--- /dev/null
+++ b/doc/fluid/api/nn/distribute_fpn_proposals.rst
@@ -0,0 +1,7 @@
+.. _api_nn_distribute_fpn_proposals:
+
+distribute_fpn_proposals
+-------------------------------
+:doc_source: paddle.fluid.layers.distribute_fpn_proposals
+
+
diff --git a/doc/fluid/api/nn/dropout.rst b/doc/fluid/api/nn/dropout.rst
new file mode 100644
index 0000000000000000000000000000000000000000..34277a1696816f65838ef17c576b864d74a43429
--- /dev/null
+++ b/doc/fluid/api/nn/dropout.rst
@@ -0,0 +1,7 @@
+.. _api_nn_dropout:
+
+dropout
+-------------------------------
+:doc_source: paddle.fluid.layers.dropout
+
+
diff --git a/doc/fluid/api/nn/edit_distance.rst b/doc/fluid/api/nn/edit_distance.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8673f388a89c501b1df15e17c4bfb0767d884d93
--- /dev/null
+++ b/doc/fluid/api/nn/edit_distance.rst
@@ -0,0 +1,7 @@
+.. _api_nn_edit_distance:
+
+edit_distance
+-------------------------------
+:doc_source: paddle.fluid.layers.edit_distance
+
+
diff --git a/doc/fluid/api/nn/elu.rst b/doc/fluid/api/nn/elu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb526089915c31f2761ba0be40087af8233c3632
--- /dev/null
+++ b/doc/fluid/api/nn/elu.rst
@@ -0,0 +1,9 @@
+.. _api_nn_elu:
+
+elu
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.elu
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/erf.rst b/doc/fluid/api/nn/erf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ed8205a211d0ff276ef1145b9985049b90e452ab
--- /dev/null
+++ b/doc/fluid/api/nn/erf.rst
@@ -0,0 +1,7 @@
+.. _api_nn_erf:
+
+erf
+-------------------------------
+:doc_source: paddle.fluid.layers.erf
+
+
diff --git a/doc/fluid/api/nn/exponential_decay.rst b/doc/fluid/api/nn/exponential_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c27917b73e7be52a1bd1edec900343774333a216
--- /dev/null
+++ b/doc/fluid/api/nn/exponential_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_exponential_decay:
+
+exponential_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.exponential_decay
+
+
diff --git a/doc/fluid/api/nn/filter_by_instag.rst b/doc/fluid/api/nn/filter_by_instag.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3587ac608f0d321d83dca11de89966f628c9eed0
--- /dev/null
+++ b/doc/fluid/api/nn/filter_by_instag.rst
@@ -0,0 +1,7 @@
+.. _api_nn_filter_by_instag:
+
+filter_by_instag
+-------------------------------
+:doc_source: paddle.fluid.layers.filter_by_instag
+
+
diff --git a/doc/fluid/api/nn/fsp_matrix.rst b/doc/fluid/api/nn/fsp_matrix.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e44a51d85c2697948ac64a744c31d99dbf913e4a
--- /dev/null
+++ b/doc/fluid/api/nn/fsp_matrix.rst
@@ -0,0 +1,7 @@
+.. _api_nn_fsp_matrix:
+
+fsp_matrix
+-------------------------------
+:doc_source: paddle.fluid.layers.fsp_matrix
+
+
diff --git a/doc/fluid/api/nn/functional.rst b/doc/fluid/api/nn/functional.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1c8c2d3fcf0c3f4a76a82b50a7157ad66966be81
--- /dev/null
+++ b/doc/fluid/api/nn/functional.rst
@@ -0,0 +1,21 @@
+==========
+functional
+==========
+
+.. toctree::
+ :maxdepth: 1
+
+ functional/binary_cross_entropy_with_logits.rst
+ functional/l1_loss.rst
+ functional/nll_loss.rst
+ functional/mse_loss.rst
+ functional/kl_div.rst
+ functional/one_hot.rst
+ functional/ctc_loss.rst
+ functional/adaptive_avg_pool2d.rst
+ functional/adaptive_avg_pool3d.rst
+ functional/conv2d.rst
+ functional/conv3d.rst
+ functional/conv_transpose2d.rst
+ functional/conv_transpose3d.rst
+ functional/bilinear.rst
diff --git a/doc/fluid/api/nn/functional/activation/sigmoid.rst b/doc/fluid/api/nn/functional/activation/sigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3ed969b9d6ff4102aeaea815ed5746a11d76a57d
--- /dev/null
+++ b/doc/fluid/api/nn/functional/activation/sigmoid.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_activation_sigmoid:
+
+sigmoid
+-------
+
+.. autofunction:: paddle.nn.functional.activation.sigmoid
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst b/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d0eff20c18b8ea6d58cfe0405c16aee6f721d30f
--- /dev/null
+++ b/doc/fluid/api/nn/functional/adaptive_avg_pool2d.rst
@@ -0,0 +1,8 @@
+.. _api_nn_functional_adaptive_avg_pool2d:
+
+adaptive_avg_pool2d
+--------------------
+
+.. autofunction:: paddle.nn.functional.adaptive_avg_pool2d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst b/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4765c3e11179f685033023fa8e7cd50846367d55
--- /dev/null
+++ b/doc/fluid/api/nn/functional/adaptive_avg_pool3d.rst
@@ -0,0 +1,8 @@
+.. _api_nn_functional_adaptive_avg_pool3d:
+
+adaptive_avg_pool3d
+--------------------
+
+.. autofunction:: paddle.nn.functional.adaptive_avg_pool3d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/bilinear.rst b/doc/fluid/api/nn/functional/bilinear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0b15e7c385a851236db50ef84e62bec3bbb59312
--- /dev/null
+++ b/doc/fluid/api/nn/functional/bilinear.rst
@@ -0,0 +1,8 @@
+.. _api_nn_functional_bilinear:
+
+bilinear
+--------------------
+
+.. autofunction:: paddle.nn.functional.bilinear
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/binary_cross_entropy.rst b/doc/fluid/api/nn/functional/binary_cross_entropy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..adafad14aac0b6523c147a0b6803b208004a4fdc
--- /dev/null
+++ b/doc/fluid/api/nn/functional/binary_cross_entropy.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_binary_cross_entropy:
+
+binary_cross_entropy
+--------------------
+
+.. autofunction:: paddle.nn.functional.binary_cross_entropy
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst b/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4ee93c942d3a6c7bb123de1e05e96d3a486c157c
--- /dev/null
+++ b/doc/fluid/api/nn/functional/binary_cross_entropy_with_logits.rst
@@ -0,0 +1,10 @@
+.. _api_nn_functional_binary_cross_entropy_with_logits:
+
+binary_cross_entropy_with_logits
+--------------------------------
+
+.. autoclass:: paddle.nn.functional.binary_cross_entropy_with_logits
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/conv2d.rst b/doc/fluid/api/nn/functional/conv2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bdf3753b941f5a6678575786eaeed8f8e4238d39
--- /dev/null
+++ b/doc/fluid/api/nn/functional/conv2d.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_conv_conv2d:
+
+conv2d
+------
+
+.. autofunction:: paddle.nn.functional.conv.conv2d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/conv3d.rst b/doc/fluid/api/nn/functional/conv3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..11588b19ac8b64b90b10a50faa61aefe74e6203c
--- /dev/null
+++ b/doc/fluid/api/nn/functional/conv3d.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_conv_conv3d:
+
+conv3d
+------
+
+.. autofunction:: paddle.nn.functional.conv.conv3d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/conv_transpose2d.rst b/doc/fluid/api/nn/functional/conv_transpose2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..da09490ed2f02740c80f64387f4f18dab4916825
--- /dev/null
+++ b/doc/fluid/api/nn/functional/conv_transpose2d.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_conv_conv_transpose2d:
+
+conv_transpose2d
+----------------
+
+.. autofunction:: paddle.nn.functional.conv.conv_transpose2d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/conv_transpose3d.rst b/doc/fluid/api/nn/functional/conv_transpose3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..722a2d8b8ee481e43ab16bb1e69d46534c49f7d4
--- /dev/null
+++ b/doc/fluid/api/nn/functional/conv_transpose3d.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_conv_conv_transpose3d:
+
+conv_transpose3d
+----------------
+
+.. autofunction:: paddle.nn.functional.conv.conv_transpose3d
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/ctc_loss.rst b/doc/fluid/api/nn/functional/ctc_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c77513c956eb592fc3b1335f08d75b37b0080bff
--- /dev/null
+++ b/doc/fluid/api/nn/functional/ctc_loss.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_ctc_loss:
+
+ctc_loss
+--------
+
+.. autofunction:: paddle.nn.functional.loss.ctc_loss
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/kl_div.rst b/doc/fluid/api/nn/functional/kl_div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cbd2e10aa2381a61bb03f7b94d99a86ac53fcd23
--- /dev/null
+++ b/doc/fluid/api/nn/functional/kl_div.rst
@@ -0,0 +1,10 @@
+.. _api_nn_functional_kl_div:
+
+kl_div
+-------------------------------
+
+.. autoclass:: paddle.nn.functional.kl_div
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/l1_loss.rst b/doc/fluid/api/nn/functional/l1_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01a3ea06e7d034eb70744146816e6d0a166b749d
--- /dev/null
+++ b/doc/fluid/api/nn/functional/l1_loss.rst
@@ -0,0 +1,10 @@
+.. _api_nn_functional_l1_loss:
+
+l1_loss
+------
+
+.. autoclass:: paddle.nn.functional.l1_loss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst b/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e92eadc126a49d8a46bcfc06960eb39dcdc35fec
--- /dev/null
+++ b/doc/fluid/api/nn/functional/loss/margin_ranking_loss.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_functional_loss_margin_ranking_loss:
+
+margin_ranking_loss
+-------------------
+
+.. autofunction:: paddle.nn.functional.loss.margin_ranking_loss
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/mse_loss.rst b/doc/fluid/api/nn/functional/mse_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b5ec8c58b5a10c206e85184f36e414396fc7d9b9
--- /dev/null
+++ b/doc/fluid/api/nn/functional/mse_loss.rst
@@ -0,0 +1,10 @@
+.. _api_nn_functional_mse_loss:
+
+mse_loss
+------
+
+.. autoclass:: paddle.nn.functional.mse_loss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/nll_loss.rst b/doc/fluid/api/nn/functional/nll_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6f0ce4093ac8a9cefc202e4346457edd4b2c6ae1
--- /dev/null
+++ b/doc/fluid/api/nn/functional/nll_loss.rst
@@ -0,0 +1,10 @@
+.. _api_nn_functional_nll_loss:
+
+nll_loss
+-------------------------------
+
+.. autoclass:: paddle.nn.functional.nll_loss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/functional/one_hot.rst b/doc/fluid/api/nn/functional/one_hot.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a8e5272e9a38a3c61a7803ca04dc90803ff147df
--- /dev/null
+++ b/doc/fluid/api/nn/functional/one_hot.rst
@@ -0,0 +1,7 @@
+.. _api_nn_functional_one_hot:
+
+one_hot
+---------
+
+.. autofunction:: paddle.nn.functional.one_hot
+ :noindex:
diff --git a/doc/fluid/api/nn/gather_tree.rst b/doc/fluid/api/nn/gather_tree.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7901d38d5ae27a8ec0c76a415646d34a3b844105
--- /dev/null
+++ b/doc/fluid/api/nn/gather_tree.rst
@@ -0,0 +1,7 @@
+.. _api_nn_gather_tree:
+
+gather_tree
+-------------------------------
+:doc_source: paddle.fluid.layers.gather_tree
+
+
diff --git a/doc/fluid/api/nn/gelu.rst b/doc/fluid/api/nn/gelu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f4b5d57f8a45154f80cd3ba8754b3b676c9ddb73
--- /dev/null
+++ b/doc/fluid/api/nn/gelu.rst
@@ -0,0 +1,9 @@
+.. _api_nn_gelu:
+
+gelu
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.gelu
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/generate_mask_labels.rst b/doc/fluid/api/nn/generate_mask_labels.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01b99e4bddf75ab7a9b8beb32284e6b9f1a16d48
--- /dev/null
+++ b/doc/fluid/api/nn/generate_mask_labels.rst
@@ -0,0 +1,7 @@
+.. _api_nn_generate_mask_labels:
+
+generate_mask_labels
+-------------------------------
+:doc_source: paddle.fluid.layers.generate_mask_labels
+
+
diff --git a/doc/fluid/api/nn/generate_proposal_labels.rst b/doc/fluid/api/nn/generate_proposal_labels.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75b0d264dd57cce5be3d6b2d72e571aefa21d8bd
--- /dev/null
+++ b/doc/fluid/api/nn/generate_proposal_labels.rst
@@ -0,0 +1,7 @@
+.. _api_nn_generate_proposal_labels:
+
+generate_proposal_labels
+-------------------------------
+:doc_source: paddle.fluid.layers.generate_proposal_labels
+
+
diff --git a/doc/fluid/api/nn/generate_proposals.rst b/doc/fluid/api/nn/generate_proposals.rst
new file mode 100644
index 0000000000000000000000000000000000000000..47057daa5385689a2344132a8931886c5c80f0a8
--- /dev/null
+++ b/doc/fluid/api/nn/generate_proposals.rst
@@ -0,0 +1,7 @@
+.. _api_nn_generate_proposals:
+
+generate_proposals
+-------------------------------
+:doc_source: paddle.fluid.layers.generate_proposals
+
+
diff --git a/doc/fluid/api/nn/grid_sampler.rst b/doc/fluid/api/nn/grid_sampler.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a16575cc78a5cd88abfe2ea0b700d9a4c63f42a4
--- /dev/null
+++ b/doc/fluid/api/nn/grid_sampler.rst
@@ -0,0 +1,7 @@
+.. _api_nn_grid_sampler:
+
+grid_sampler
+-------------------------------
+:doc_source: paddle.fluid.layers.grid_sampler
+
+
diff --git a/doc/fluid/api/nn/hard_sigmoid.rst b/doc/fluid/api/nn/hard_sigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2570f0da1aab233cf7e80def8ebdcacc0e6a6e4e
--- /dev/null
+++ b/doc/fluid/api/nn/hard_sigmoid.rst
@@ -0,0 +1,7 @@
+.. _api_nn_hard_sigmoid:
+
+hard_sigmoid
+-------------------------------
+:doc_source: paddle.fluid.layers.hard_sigmoid
+
+
diff --git a/doc/fluid/api/nn/hard_swish.rst b/doc/fluid/api/nn/hard_swish.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8a44bfb974ea366d8541117f838ddc9dbf76a514
--- /dev/null
+++ b/doc/fluid/api/nn/hard_swish.rst
@@ -0,0 +1,7 @@
+.. _api_nn_hard_swish:
+
+hard_swish
+-------------------------------
+:doc_source: paddle.fluid.layers.hard_swish
+
+
diff --git a/doc/fluid/api/nn/hardshrink.rst b/doc/fluid/api/nn/hardshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..48b98f2a5366941aa80c5dcd6b64b5a089378860
--- /dev/null
+++ b/doc/fluid/api/nn/hardshrink.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_hardshrink:
+
+hardshrink
+----------
+
+.. autofunction:: paddle.nn.functional.hardshrink
+ :noindex:
+
diff --git a/doc/fluid/api/nn/hardtanh.rst b/doc/fluid/api/nn/hardtanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d5c796a6638119b9cf0aaf0271b785cb8a54e27d
--- /dev/null
+++ b/doc/fluid/api/nn/hardtanh.rst
@@ -0,0 +1,7 @@
+.. _api_nn_hardtanh:
+
+hardtanh
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.hardtanh
+ :noindex:
diff --git a/doc/fluid/api/nn/hash.rst b/doc/fluid/api/nn/hash.rst
new file mode 100644
index 0000000000000000000000000000000000000000..94c594c22db97978534bb5285ad6ce122384dcf7
--- /dev/null
+++ b/doc/fluid/api/nn/hash.rst
@@ -0,0 +1,7 @@
+.. _api_nn_hash:
+
+hash
+-------------------------------
+:doc_source: paddle.fluid.layers.hash
+
+
diff --git a/doc/fluid/api/nn/hsigmoid.rst b/doc/fluid/api/nn/hsigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8e6e391581f3755b7b319dae486f05d173483bf4
--- /dev/null
+++ b/doc/fluid/api/nn/hsigmoid.rst
@@ -0,0 +1,7 @@
+.. _api_nn_hsigmoid:
+
+hsigmoid
+-------------------------------
+:doc_source: paddle.fluid.layers.hsigmoid
+
+
diff --git a/doc/fluid/api/nn/huber_loss.rst b/doc/fluid/api/nn/huber_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5403abfbaedaff8c032b5e0734fe071368006f95
--- /dev/null
+++ b/doc/fluid/api/nn/huber_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_huber_loss:
+
+huber_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.huber_loss
+
+
diff --git a/doc/fluid/api/nn/image_resize.rst b/doc/fluid/api/nn/image_resize.rst
new file mode 100644
index 0000000000000000000000000000000000000000..724df43ce156a5632c584e8103aed56b1d53937d
--- /dev/null
+++ b/doc/fluid/api/nn/image_resize.rst
@@ -0,0 +1,7 @@
+.. _api_nn_image_resize:
+
+image_resize
+-------------------------------
+:doc_source: paddle.fluid.layers.image_resize
+
+
diff --git a/doc/fluid/api/nn/image_resize_short.rst b/doc/fluid/api/nn/image_resize_short.rst
new file mode 100644
index 0000000000000000000000000000000000000000..13b242973c90acac6db414d60a16e3fe74ec479a
--- /dev/null
+++ b/doc/fluid/api/nn/image_resize_short.rst
@@ -0,0 +1,7 @@
+.. _api_nn_image_resize_short:
+
+image_resize_short
+-------------------------------
+:doc_source: paddle.fluid.layers.image_resize_short
+
+
diff --git a/doc/fluid/api/nn/initializer.rst b/doc/fluid/api/nn/initializer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c373707888da5732c1eff5786f2a27347f248b4a
--- /dev/null
+++ b/doc/fluid/api/nn/initializer.rst
@@ -0,0 +1,15 @@
+=======================
+paddle.nn.initializer
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+initializer/Bilinear.rst
+
+initializer/Constant.rst
+initializer/MSRA.rst
+initializer/Normal.rst
+initializer/TruncatedNormal.rst
+initializer/Uniform.rst
+initializer/Xavier.rst
diff --git a/doc/fluid/api/nn/initializer/Bilinear.rst b/doc/fluid/api/nn/initializer/Bilinear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dcbcde63d32526d19ad7924825d804b75d0559a6
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/Bilinear.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_Bilinear:
+
+Bilinear
+-------------------------------
+:doc_source: paddle.fluid.initializer.Bilinear
+
+
diff --git a/doc/fluid/api/nn/initializer/Constant.rst b/doc/fluid/api/nn/initializer/Constant.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9a1515be6ebd1b0f7dbbd997aa8f1fd42e556238
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/Constant.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_Constant:
+
+Constant
+-------------------------------
+:doc_source: paddle.fluid.initializer.Constant
+
+
diff --git a/doc/fluid/api/nn/initializer/MSRA.rst b/doc/fluid/api/nn/initializer/MSRA.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6fd0ba7e775afa83d8ee90408c3b35760e74dcfc
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/MSRA.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_MSRA:
+
+MSRA
+-------------------------------
+:doc_source: paddle.fluid.initializer.MSRA
+
+
diff --git a/doc/fluid/api/nn/initializer/Normal.rst b/doc/fluid/api/nn/initializer/Normal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..da03e41daeb80ef44a36f4e78898d4f6b2896fe1
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/Normal.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_Normal:
+
+Normal
+-------------------------------
+:doc_source: paddle.fluid.initializer.Normal
+
+
diff --git a/doc/fluid/api/nn/initializer/TruncatedNormal.rst b/doc/fluid/api/nn/initializer/TruncatedNormal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d1b8de9e61d9deb283d9e5c17512fbb4ae9667c9
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/TruncatedNormal.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_TruncatedNormal:
+
+TruncatedNormal
+-------------------------------
+:doc_source: paddle.fluid.initializer.TruncatedNormal
+
+
diff --git a/doc/fluid/api/nn/initializer/Uniform.rst b/doc/fluid/api/nn/initializer/Uniform.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc7ceb0add6adb2b702f71382ba1aa97b9f16257
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/Uniform.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_Uniform:
+
+Uniform
+-------------------------------
+:doc_source: paddle.fluid.initializer.Uniform
+
+
diff --git a/doc/fluid/api/nn/initializer/Xavier.rst b/doc/fluid/api/nn/initializer/Xavier.rst
new file mode 100644
index 0000000000000000000000000000000000000000..03d722686e8dd611531083dd2c3d1d9da6b6d4f1
--- /dev/null
+++ b/doc/fluid/api/nn/initializer/Xavier.rst
@@ -0,0 +1,7 @@
+.. _api_nn_initializer_Xavier:
+
+Xavier
+-------------------------------
+:doc_source: paddle.fluid.initializer.Xavier
+
+
diff --git a/doc/fluid/api/nn/inverse_time_decay.rst b/doc/fluid/api/nn/inverse_time_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0f238f53c7595ede6897587a0cf0d077ea3d98d2
--- /dev/null
+++ b/doc/fluid/api/nn/inverse_time_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_inverse_time_decay:
+
+inverse_time_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.inverse_time_decay
+
+
diff --git a/doc/fluid/api/nn/iou_similarity.rst b/doc/fluid/api/nn/iou_similarity.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e81a38b061a461e73c3966b3118d6507802b37d2
--- /dev/null
+++ b/doc/fluid/api/nn/iou_similarity.rst
@@ -0,0 +1,7 @@
+.. _api_nn_iou_similarity:
+
+iou_similarity
+-------------------------------
+:doc_source: paddle.fluid.layers.iou_similarity
+
+
diff --git a/doc/fluid/api/nn/kldiv_loss.rst b/doc/fluid/api/nn/kldiv_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..185dbbd71a4d37a00c040cdbc75d758941607cf5
--- /dev/null
+++ b/doc/fluid/api/nn/kldiv_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_kldiv_loss:
+
+kldiv_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.kldiv_loss
+
+
diff --git a/doc/fluid/api/nn/l2_normalize.rst b/doc/fluid/api/nn/l2_normalize.rst
new file mode 100644
index 0000000000000000000000000000000000000000..41c75aa8ca1545956b3e95322b66ad00cc06a914
--- /dev/null
+++ b/doc/fluid/api/nn/l2_normalize.rst
@@ -0,0 +1,7 @@
+.. _api_nn_l2_normalize:
+
+l2_normalize
+-------------------------------
+:doc_source: paddle.fluid.layers.l2_normalize
+
+
diff --git a/doc/fluid/api/nn/label_smooth.rst b/doc/fluid/api/nn/label_smooth.rst
new file mode 100644
index 0000000000000000000000000000000000000000..119775ec66dd09666490c2d3d45ee12bfec569dc
--- /dev/null
+++ b/doc/fluid/api/nn/label_smooth.rst
@@ -0,0 +1,7 @@
+.. _api_nn_label_smooth:
+
+label_smooth
+-------------------------------
+:doc_source: paddle.fluid.layers.label_smooth
+
+
diff --git a/doc/fluid/api/nn/layer/activation/Sigmoid.rst b/doc/fluid/api/nn/layer/activation/Sigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..46a0c2d263eaf10cbc81a342135ace1b58b2e2f4
--- /dev/null
+++ b/doc/fluid/api/nn/layer/activation/Sigmoid.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_activation_Sigmoid:
+
+Sigmoid
+-------
+
+.. autoclass:: paddle.nn.layer.activation.Sigmoid
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst b/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d69d1deff5defab24b2f12ea877c3a208a801478
--- /dev/null
+++ b/doc/fluid/api/nn/layer/loss/MarginRankingLoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_layer_loss_MarginRankingLoss:
+
+MarginRankingLoss
+-----------------
+
+.. autoclass:: paddle.nn.layer.loss.MarginRankingLoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/leaky_relu.rst b/doc/fluid/api/nn/leaky_relu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cf38cdb92c3d432c66f95aaed46449c52677287f
--- /dev/null
+++ b/doc/fluid/api/nn/leaky_relu.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_leaky_relu:
+
+leaky_relu
+----------
+
+.. autofunction:: paddle.nn.functional.leaky_relu
+ :noindex:
+
diff --git a/doc/fluid/api/nn/linear_lr_warmup.rst b/doc/fluid/api/nn/linear_lr_warmup.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e778f3061778ac4ae3d4fd10510a9ad0142294f7
--- /dev/null
+++ b/doc/fluid/api/nn/linear_lr_warmup.rst
@@ -0,0 +1,7 @@
+.. _api_nn_linear_lr_warmup:
+
+linear_lr_warmup
+-------------------------------
+:doc_source: paddle.fluid.layers.linear_lr_warmup
+
+
diff --git a/doc/fluid/api/nn/log_loss.rst b/doc/fluid/api/nn/log_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f57ea2cfa0cb472ed3a19eb377f1d8d5c0a1c752
--- /dev/null
+++ b/doc/fluid/api/nn/log_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_log_loss:
+
+log_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.log_loss
+
+
diff --git a/doc/fluid/api/nn/log_sigmoid.rst b/doc/fluid/api/nn/log_sigmoid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c8df00e2f286bf8bf4864b8f708e8d1fe5453034
--- /dev/null
+++ b/doc/fluid/api/nn/log_sigmoid.rst
@@ -0,0 +1,9 @@
+.. _api_nn_log_sigmoid:
+
+log_sigmoid
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.log_sigmoid
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/log_softmax.rst b/doc/fluid/api/nn/log_softmax.rst
new file mode 100644
index 0000000000000000000000000000000000000000..88e8b52219798fb016f567414ac88157e4e107b6
--- /dev/null
+++ b/doc/fluid/api/nn/log_softmax.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_log_softmax:
+
+log_softmax
+-----------
+
+.. autofunction:: paddle.nn.functional.log_softmax
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/loss.rst b/doc/fluid/api/nn/loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c9c351b78183ad0fea22a5df9505176a81199b37
--- /dev/null
+++ b/doc/fluid/api/nn/loss.rst
@@ -0,0 +1,17 @@
+====
+loss
+====
+
+.. toctree::
+ :maxdepth: 1
+
+ loss/BCELoss.rst
+ loss/BCEWithLogitsLoss.rst
+ loss/CrossEntropyLoss.rst
+ loss/L1Loss.rst
+ loss/MSELoss.rst
+ loss/NLLLoss.rst
+ loss/KLDivLoss.rst
+ loss/SmoothL1Loss.rst
+ loss/CTCLoss.rst
+ loss/KLDivLoss.rst
diff --git a/doc/fluid/api/nn/loss/BCELoss.rst b/doc/fluid/api/nn/loss/BCELoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2dd78661c46835024ad4826c6108a3115e329e45
--- /dev/null
+++ b/doc/fluid/api/nn/loss/BCELoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_BCELoss:
+
+BCELoss
+-------------------------------
+
+.. autoclass:: paddle.nn.loss.BCELoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst b/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..03bc6f4e4309d49152cec2b264a9ec9ebad5b373
--- /dev/null
+++ b/doc/fluid/api/nn/loss/BCEWithLogitsLoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_BCEWithLogitsLoss:
+
+BCEWithLogitsLoss
+-------------------------------
+
+.. autoclass:: paddle.nn.loss.BCEWithLogitsLoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/CTCLoss.rst b/doc/fluid/api/nn/loss/CTCLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..13ce8ac53cac0808338ca95ff152c491b79706a2
--- /dev/null
+++ b/doc/fluid/api/nn/loss/CTCLoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_CTCLoss:
+
+CTCLoss
+-------
+
+.. autoclass:: paddle.nn.loss.CTCLoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/KLDivLoss.rst b/doc/fluid/api/nn/loss/KLDivLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..797e815e29bfa1e42acb446461a8df499c961469
--- /dev/null
+++ b/doc/fluid/api/nn/loss/KLDivLoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_KLDivLoss:
+
+KLDivLoss
+-------------------------------
+
+.. autoclass:: paddle.nn.loss.KLDivLoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/L1Loss.rst b/doc/fluid/api/nn/loss/L1Loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..161cb38c80a87c4ba38ed685133a705811ec9103
--- /dev/null
+++ b/doc/fluid/api/nn/loss/L1Loss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_L1Loss:
+
+L1Loss
+------
+
+.. autoclass:: paddle.nn.loss.L1Loss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/NLLLoss.rst b/doc/fluid/api/nn/loss/NLLLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c1a0c26de51b8869a8eccb2150c8e5635159f1de
--- /dev/null
+++ b/doc/fluid/api/nn/loss/NLLLoss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_NLLLoss:
+
+NLLLoss
+-------------------------------
+
+.. autoclass:: paddle.nn.loss.NLLLoss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/loss/SmoothL1Loss.rst b/doc/fluid/api/nn/loss/SmoothL1Loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7f5654a5587bde56359b2c59fd5c6a4df5a08208
--- /dev/null
+++ b/doc/fluid/api/nn/loss/SmoothL1Loss.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_nn_loss_SmoothL1Loss:
+
+SmoothL1Loss
+-------------------------------
+
+.. autoclass:: paddle.nn.loss.SmoothL1Loss
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/nn/lrn.rst b/doc/fluid/api/nn/lrn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..40bb92de7fac547a62a9136c453121bb05dc609e
--- /dev/null
+++ b/doc/fluid/api/nn/lrn.rst
@@ -0,0 +1,7 @@
+.. _api_nn_lrn:
+
+lrn
+-------------------------------
+:doc_source: paddle.fluid.layers.lrn
+
+
diff --git a/doc/fluid/api/nn/matrix_nms.rst b/doc/fluid/api/nn/matrix_nms.rst
new file mode 100644
index 0000000000000000000000000000000000000000..49529d0faf1118dc3c61018d1be232b5d7ff5b63
--- /dev/null
+++ b/doc/fluid/api/nn/matrix_nms.rst
@@ -0,0 +1,5 @@
+.. _api_nn_matrix_nms:
+
+matrix_nms
+-------------------------------
+:doc_source: paddle.fluid.layers.matrix_nms
diff --git a/doc/fluid/api/nn/maxout.rst b/doc/fluid/api/nn/maxout.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dab8bb5de3e7f5346e0ebb3118f34fa63152c28a
--- /dev/null
+++ b/doc/fluid/api/nn/maxout.rst
@@ -0,0 +1,7 @@
+.. _api_nn_maxout:
+
+maxout
+-------------------------------
+:doc_source: paddle.fluid.layers.maxout
+
+
diff --git a/doc/fluid/api/nn/mse_loss.rst b/doc/fluid/api/nn/mse_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..30ed19b977a9cd960c70b1a53dcc52aa28bfe16f
--- /dev/null
+++ b/doc/fluid/api/nn/mse_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_mse_loss:
+
+mse_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.mse_loss
+
+
diff --git a/doc/fluid/api/nn/multiclass_nms.rst b/doc/fluid/api/nn/multiclass_nms.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b81b4a72a50185e4b815d9b252d5747ed6890ef4
--- /dev/null
+++ b/doc/fluid/api/nn/multiclass_nms.rst
@@ -0,0 +1,7 @@
+.. _api_nn_multiclass_nms:
+
+multiclass_nms
+-------------------------------
+:doc_source: paddle.fluid.layers.multiclass_nms
+
+
diff --git a/doc/fluid/api/nn/natural_exp_decay.rst b/doc/fluid/api/nn/natural_exp_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9ea89ca41ccca7e712da71ef89cf6851e17548a6
--- /dev/null
+++ b/doc/fluid/api/nn/natural_exp_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_natural_exp_decay:
+
+natural_exp_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.natural_exp_decay
+
+
diff --git a/doc/fluid/api/nn/noam_decay.rst b/doc/fluid/api/nn/noam_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc1b2e6086e96c9aeba4c2bd0d14de5ebdf9b360
--- /dev/null
+++ b/doc/fluid/api/nn/noam_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_noam_decay:
+
+noam_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.noam_decay
+
+
diff --git a/doc/fluid/api/nn/npair_loss.rst b/doc/fluid/api/nn/npair_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..029cb1d9bc127d2dbfb1d954300192e3636130bb
--- /dev/null
+++ b/doc/fluid/api/nn/npair_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_npair_loss:
+
+npair_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.npair_loss
+
+
diff --git a/doc/fluid/api/nn/one_hot.rst b/doc/fluid/api/nn/one_hot.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f017ed733490ba1226ab7e807fee14f85bc0308b
--- /dev/null
+++ b/doc/fluid/api/nn/one_hot.rst
@@ -0,0 +1,7 @@
+.. _api_nn_one_hot:
+
+one_hot
+-------------------------------
+:doc_source: paddle.fluid.one_hot
+
+
diff --git a/doc/fluid/api/nn/pad.rst b/doc/fluid/api/nn/pad.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4459de858d4d80544d92acff9ca3e62c8446e890
--- /dev/null
+++ b/doc/fluid/api/nn/pad.rst
@@ -0,0 +1,7 @@
+.. _api_nn_pad:
+
+pad
+-------------------------------
+:doc_source: paddle.nn.functional.pad
+
+
diff --git a/doc/fluid/api/nn/pad2d.rst b/doc/fluid/api/nn/pad2d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..46004778395c7d080549eaa190d3539a78734a15
--- /dev/null
+++ b/doc/fluid/api/nn/pad2d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_pad2d:
+
+pad2d
+-------------------------------
+:doc_source: paddle.fluid.layers.pad2d
+
+
diff --git a/doc/fluid/api/nn/pad_constant_like.rst b/doc/fluid/api/nn/pad_constant_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb37618896562f128fd0e0a46a7f3422b6c13891
--- /dev/null
+++ b/doc/fluid/api/nn/pad_constant_like.rst
@@ -0,0 +1,7 @@
+.. _api_nn_pad_constant_like:
+
+pad_constant_like
+-------------------------------
+:doc_source: paddle.fluid.layers.pad_constant_like
+
+
diff --git a/doc/fluid/api/nn/piecewise_decay.rst b/doc/fluid/api/nn/piecewise_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3fedfda0477f2e46eca6a0ab22c92a1f54a21039
--- /dev/null
+++ b/doc/fluid/api/nn/piecewise_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_piecewise_decay:
+
+piecewise_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.piecewise_decay
+
+
diff --git a/doc/fluid/api/nn/pixel_shuffle.rst b/doc/fluid/api/nn/pixel_shuffle.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d10cd106f3d1fb5ae71fd4ef700531e6ee1f253f
--- /dev/null
+++ b/doc/fluid/api/nn/pixel_shuffle.rst
@@ -0,0 +1,7 @@
+.. _api_nn_pixel_shuffle:
+
+pixel_shuffle
+-------------------------------
+:doc_source: paddle.fluid.layers.pixel_shuffle
+
+
diff --git a/doc/fluid/api/nn/polygon_box_transform.rst b/doc/fluid/api/nn/polygon_box_transform.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a993817b38a16227a04d1553f39881ea38ca8ef1
--- /dev/null
+++ b/doc/fluid/api/nn/polygon_box_transform.rst
@@ -0,0 +1,7 @@
+.. _api_nn_polygon_box_transform:
+
+polygon_box_transform
+-------------------------------
+:doc_source: paddle.fluid.layers.polygon_box_transform
+
+
diff --git a/doc/fluid/api/nn/polynomial_decay.rst b/doc/fluid/api/nn/polynomial_decay.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5f810058e42e80c9f6aab7d2b72ab3b615e16135
--- /dev/null
+++ b/doc/fluid/api/nn/polynomial_decay.rst
@@ -0,0 +1,7 @@
+.. _api_nn_polynomial_decay:
+
+polynomial_decay
+-------------------------------
+:doc_source: paddle.fluid.layers.polynomial_decay
+
+
diff --git a/doc/fluid/api/nn/pool3d.rst b/doc/fluid/api/nn/pool3d.rst
new file mode 100644
index 0000000000000000000000000000000000000000..efc4abfb951ac7d79550d86adec725bb888aceaa
--- /dev/null
+++ b/doc/fluid/api/nn/pool3d.rst
@@ -0,0 +1,7 @@
+.. _api_nn_pool3d:
+
+pool3d
+-------------------------------
+:doc_source: paddle.fluid.layers.pool3d
+
+
diff --git a/doc/fluid/api/nn/prelu.rst b/doc/fluid/api/nn/prelu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9de04524eca18ea1b1ad3d71676bae0a5c64a273
--- /dev/null
+++ b/doc/fluid/api/nn/prelu.rst
@@ -0,0 +1,7 @@
+.. _api_nn_prelu:
+
+prelu
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.prelu
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/nn/prior_box.rst b/doc/fluid/api/nn/prior_box.rst
new file mode 100644
index 0000000000000000000000000000000000000000..774f0d16865392229eb2f3cb71dee466ddf6109f
--- /dev/null
+++ b/doc/fluid/api/nn/prior_box.rst
@@ -0,0 +1,7 @@
+.. _api_nn_prior_box:
+
+prior_box
+-------------------------------
+:doc_source: paddle.fluid.layers.prior_box
+
+
diff --git a/doc/fluid/api/nn/prroi_pool.rst b/doc/fluid/api/nn/prroi_pool.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b65c861e3fc21ea59995f4c7e30c820aa44f23ae
--- /dev/null
+++ b/doc/fluid/api/nn/prroi_pool.rst
@@ -0,0 +1,7 @@
+.. _api_nn_prroi_pool:
+
+prroi_pool
+-------------------------------
+:doc_source: paddle.fluid.layers.prroi_pool
+
+
diff --git a/doc/fluid/api/nn/psroi_pool.rst b/doc/fluid/api/nn/psroi_pool.rst
new file mode 100644
index 0000000000000000000000000000000000000000..57dbd9aff8936a83f4cce749b8819492c2acdc0c
--- /dev/null
+++ b/doc/fluid/api/nn/psroi_pool.rst
@@ -0,0 +1,7 @@
+.. _api_nn_psroi_pool:
+
+psroi_pool
+-------------------------------
+:doc_source: paddle.fluid.layers.psroi_pool
+
+
diff --git a/doc/fluid/api/nn/random_crop.rst b/doc/fluid/api/nn/random_crop.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a20767bd9dc3d7be494f890e4109be41e7d7f8f
--- /dev/null
+++ b/doc/fluid/api/nn/random_crop.rst
@@ -0,0 +1,7 @@
+.. _api_nn_random_crop:
+
+random_crop
+-------------------------------
+:doc_source: paddle.fluid.layers.random_crop
+
+
diff --git a/doc/fluid/api/nn/rank_loss.rst b/doc/fluid/api/nn/rank_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67bbd6cf8732fee639da0e582ca57a3ed8050616
--- /dev/null
+++ b/doc/fluid/api/nn/rank_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_rank_loss:
+
+rank_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.rank_loss
+
+
diff --git a/doc/fluid/api/nn/relu.rst b/doc/fluid/api/nn/relu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b186a2b174d77ad0fc635d169fa065b659358c99
--- /dev/null
+++ b/doc/fluid/api/nn/relu.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ReLU:
+
+ReLU
+-------------------------------
+:doc_source: paddle.fluid.layers.relu
+
+
diff --git a/doc/fluid/api/nn/relu6.rst b/doc/fluid/api/nn/relu6.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1346795b4166c57470ea65a96e36e08e9e5b879d
--- /dev/null
+++ b/doc/fluid/api/nn/relu6.rst
@@ -0,0 +1,9 @@
+.. _api_nn_relu6:
+
+relu6
+----------
+
+.. autofunction:: paddle.nn.functional.relu6
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/resize_bilinear.rst b/doc/fluid/api/nn/resize_bilinear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d07a29455d0d5480eaddf3a2e2bcfe7eb6897ad
--- /dev/null
+++ b/doc/fluid/api/nn/resize_bilinear.rst
@@ -0,0 +1,7 @@
+.. _api_nn_resize_bilinear:
+
+resize_bilinear
+-------------------------------
+:doc_source: paddle.fluid.layers.resize_bilinear
+
+
diff --git a/doc/fluid/api/nn/resize_nearest.rst b/doc/fluid/api/nn/resize_nearest.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ce415988779dac88cf8179da59913a0fc49a99fb
--- /dev/null
+++ b/doc/fluid/api/nn/resize_nearest.rst
@@ -0,0 +1,7 @@
+.. _api_nn_resize_nearest:
+
+resize_nearest
+-------------------------------
+:doc_source: paddle.fluid.layers.resize_nearest
+
+
diff --git a/doc/fluid/api/nn/resize_trilinear.rst b/doc/fluid/api/nn/resize_trilinear.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f138279c3a0ecb287e27fdb495786d8fe625c6a4
--- /dev/null
+++ b/doc/fluid/api/nn/resize_trilinear.rst
@@ -0,0 +1,7 @@
+.. _api_nn_resize_trilinear:
+
+resize_trilinear
+-------------------------------
+:doc_source: paddle.fluid.layers.resize_trilinear
+
+
diff --git a/doc/fluid/api/nn/retinanet_detection_output.rst b/doc/fluid/api/nn/retinanet_detection_output.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7e87d6d6e2508ed207478d604358ad98e9a325d5
--- /dev/null
+++ b/doc/fluid/api/nn/retinanet_detection_output.rst
@@ -0,0 +1,7 @@
+.. _api_nn_retinanet_detection_output:
+
+retinanet_detection_output
+-------------------------------
+:doc_source: paddle.fluid.layers.retinanet_detection_output
+
+
diff --git a/doc/fluid/api/nn/retinanet_target_assign.rst b/doc/fluid/api/nn/retinanet_target_assign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..35ddef7d38f4e59c7764004c0ae729865ee7cc8b
--- /dev/null
+++ b/doc/fluid/api/nn/retinanet_target_assign.rst
@@ -0,0 +1,7 @@
+.. _api_nn_retinanet_target_assign:
+
+retinanet_target_assign
+-------------------------------
+:doc_source: paddle.fluid.layers.retinanet_target_assign
+
+
diff --git a/doc/fluid/api/nn/roi_align.rst b/doc/fluid/api/nn/roi_align.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d8de66d0ca330b5c00502d6b31d685d769f42616
--- /dev/null
+++ b/doc/fluid/api/nn/roi_align.rst
@@ -0,0 +1,7 @@
+.. _api_nn_roi_align:
+
+roi_align
+-------------------------------
+:doc_source: paddle.fluid.layers.roi_align
+
+
diff --git a/doc/fluid/api/nn/roi_perspective_transform.rst b/doc/fluid/api/nn/roi_perspective_transform.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a8a1f3b6456f806822f818dbb6e8dfa890edd17b
--- /dev/null
+++ b/doc/fluid/api/nn/roi_perspective_transform.rst
@@ -0,0 +1,7 @@
+.. _api_nn_roi_perspective_transform:
+
+roi_perspective_transform
+-------------------------------
+:doc_source: paddle.fluid.layers.roi_perspective_transform
+
+
diff --git a/doc/fluid/api/nn/roi_pool.rst b/doc/fluid/api/nn/roi_pool.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c5df77edae18d5abfae0683e1706b6f86a12b18e
--- /dev/null
+++ b/doc/fluid/api/nn/roi_pool.rst
@@ -0,0 +1,7 @@
+.. _api_nn_roi_pool:
+
+roi_pool
+-------------------------------
+:doc_source: paddle.fluid.layers.roi_pool
+
+
diff --git a/doc/fluid/api/nn/row_conv.rst b/doc/fluid/api/nn/row_conv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..93eb2dc4dd89a038653e9cba728cff085e713e70
--- /dev/null
+++ b/doc/fluid/api/nn/row_conv.rst
@@ -0,0 +1,7 @@
+.. _api_nn_row_conv:
+
+row_conv
+-------------------------------
+:doc_source: paddle.fluid.layers.row_conv
+
+
diff --git a/doc/fluid/api/nn/rpn_target_assign.rst b/doc/fluid/api/nn/rpn_target_assign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..91c2213e7bed2b094f64fce29d51720b97304e19
--- /dev/null
+++ b/doc/fluid/api/nn/rpn_target_assign.rst
@@ -0,0 +1,7 @@
+.. _api_nn_rpn_target_assign:
+
+rpn_target_assign
+-------------------------------
+:doc_source: paddle.fluid.layers.rpn_target_assign
+
+
diff --git a/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst b/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a9619939163ac5bc1ed64e24d197975bc81ce28f
--- /dev/null
+++ b/doc/fluid/api/nn/sampled_softmax_with_cross_entropy.rst
@@ -0,0 +1,7 @@
+.. _api_nn_sampled_softmax_with_cross_entropy:
+
+sampled_softmax_with_cross_entropy
+-------------------------------
+:doc_source: paddle.fluid.layers.sampled_softmax_with_cross_entropy
+
+
diff --git a/doc/fluid/api/nn/selu.rst b/doc/fluid/api/nn/selu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cce236e7c93fec18b2eba99ae55a107e4a556a39
--- /dev/null
+++ b/doc/fluid/api/nn/selu.rst
@@ -0,0 +1,9 @@
+.. _api_nn_selu:
+
+selu
+----------
+
+.. autofunction:: paddle.nn.functional.selu
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/shuffle_channel.rst b/doc/fluid/api/nn/shuffle_channel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..746eadbd98b7974655993b4c871f1834e6b6a386
--- /dev/null
+++ b/doc/fluid/api/nn/shuffle_channel.rst
@@ -0,0 +1,7 @@
+.. _api_nn_shuffle_channel:
+
+shuffle_channel
+-------------------------------
+:doc_source: paddle.fluid.layers.shuffle_channel
+
+
diff --git a/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst b/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a6b2fef68f11a716eef991c239d9c3eeb8701b1e
--- /dev/null
+++ b/doc/fluid/api/nn/sigmoid_cross_entropy_with_logits.rst
@@ -0,0 +1,7 @@
+.. _api_nn_sigmoid_cross_entropy_with_logits:
+
+sigmoid_cross_entropy_with_logits
+-------------------------------
+:doc_source: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
+
+
diff --git a/doc/fluid/api/nn/sigmoid_focal_loss.rst b/doc/fluid/api/nn/sigmoid_focal_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9ea53101ffe367a8233922049413ad78b8690ebc
--- /dev/null
+++ b/doc/fluid/api/nn/sigmoid_focal_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_sigmoid_focal_loss:
+
+sigmoid_focal_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.sigmoid_focal_loss
+
+
diff --git a/doc/fluid/api/nn/similarity_focus.rst b/doc/fluid/api/nn/similarity_focus.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7f3bacb72cfcb2ec7a33b5d38f8df8748212f62d
--- /dev/null
+++ b/doc/fluid/api/nn/similarity_focus.rst
@@ -0,0 +1,7 @@
+.. _api_nn_similarity_focus:
+
+similarity_focus
+-------------------------------
+:doc_source: paddle.fluid.layers.similarity_focus
+
+
diff --git a/doc/fluid/api/nn/smooth_l1.rst b/doc/fluid/api/nn/smooth_l1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6329397aed97a52ae3b2adeb0b0f5456c2f925d5
--- /dev/null
+++ b/doc/fluid/api/nn/smooth_l1.rst
@@ -0,0 +1,7 @@
+.. _api_nn_smooth_l1:
+
+smooth_l1
+-------------------------------
+:doc_source: paddle.fluid.layers.smooth_l1
+
+
diff --git a/doc/fluid/api/nn/soft_relu.rst b/doc/fluid/api/nn/soft_relu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bcf0ad835e3054a58bc833f73f671a58be48e638
--- /dev/null
+++ b/doc/fluid/api/nn/soft_relu.rst
@@ -0,0 +1,7 @@
+.. _api_nn_soft_relu:
+
+soft_relu
+-------------------------------
+:doc_source: paddle.fluid.layers.soft_relu
+
+
diff --git a/doc/fluid/api/nn/softmax.rst b/doc/fluid/api/nn/softmax.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f97889cdd33799cec1af362a0a22c47de6ba0feb
--- /dev/null
+++ b/doc/fluid/api/nn/softmax.rst
@@ -0,0 +1,8 @@
+.. _api_nn_softmax:
+
+softmax
+-------------------------------
+
+.. autofunction:: paddle.nn.functional.softmax
+ :noindex:
+
diff --git a/doc/fluid/api/nn/softmax_with_cross_entropy.rst b/doc/fluid/api/nn/softmax_with_cross_entropy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fe70c8fd2ab487e9342336740b267b1b51edd729
--- /dev/null
+++ b/doc/fluid/api/nn/softmax_with_cross_entropy.rst
@@ -0,0 +1,7 @@
+.. _api_nn_softmax_with_cross_entropy:
+
+softmax_with_cross_entropy
+-------------------------------
+:doc_source: paddle.fluid.layers.softmax_with_cross_entropy
+
+
diff --git a/doc/fluid/api/nn/softplus.rst b/doc/fluid/api/nn/softplus.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c059b78c40d9ef5c7e7ce3fead8abda28e26d5f
--- /dev/null
+++ b/doc/fluid/api/nn/softplus.rst
@@ -0,0 +1,9 @@
+.. _api_nn_softplus:
+
+softplus
+----------
+
+.. autofunction:: paddle.nn.functional.softplus
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/softshrink.rst b/doc/fluid/api/nn/softshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dce2b14edd22aa12df3eb6dc348a9d031d9de79d
--- /dev/null
+++ b/doc/fluid/api/nn/softshrink.rst
@@ -0,0 +1,9 @@
+.. _api_nn_softshrink:
+
+softshrink
+----------
+
+.. autofunction:: paddle.nn.functional.softshrink
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/softsign.rst b/doc/fluid/api/nn/softsign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f2b85114794a14b3df7bab955bef7d7dd1bb06b3
--- /dev/null
+++ b/doc/fluid/api/nn/softsign.rst
@@ -0,0 +1,8 @@
+.. _api_nn_softsign:
+
+softsign
+----------
+
+.. autofunction:: paddle.nn.functional.softsign
+ :noindex:
+
diff --git a/doc/fluid/api/nn/space_to_depth.rst b/doc/fluid/api/nn/space_to_depth.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a68d52a828b6492b522381ce4d6490d2da93e18a
--- /dev/null
+++ b/doc/fluid/api/nn/space_to_depth.rst
@@ -0,0 +1,7 @@
+.. _api_nn_space_to_depth:
+
+space_to_depth
+-------------------------------
+:doc_source: paddle.fluid.layers.space_to_depth
+
+
diff --git a/doc/fluid/api/nn/square_error_cost.rst b/doc/fluid/api/nn/square_error_cost.rst
new file mode 100644
index 0000000000000000000000000000000000000000..efaba14bd90383bf17ffca190d5f49cb4ac44e7f
--- /dev/null
+++ b/doc/fluid/api/nn/square_error_cost.rst
@@ -0,0 +1,7 @@
+.. _api_nn_square_error_cost:
+
+square_error_cost
+-------------------------------
+:doc_source: paddle.fluid.layers.square_error_cost
+
+
diff --git a/doc/fluid/api/nn/ssd_loss.rst b/doc/fluid/api/nn/ssd_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c2eaab6fabf69930b9a761a059412e59ce135f6c
--- /dev/null
+++ b/doc/fluid/api/nn/ssd_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_ssd_loss:
+
+ssd_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.ssd_loss
+
+
diff --git a/doc/fluid/api/nn/swish.rst b/doc/fluid/api/nn/swish.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d5c06008d5085e24b99cdbeb4ffb32b3daf23169
--- /dev/null
+++ b/doc/fluid/api/nn/swish.rst
@@ -0,0 +1,7 @@
+.. _api_nn_swish:
+
+swish
+-------------------------------
+:doc_source: paddle.fluid.layers.swish
+
+
diff --git a/doc/fluid/api/nn/switch_case.rst b/doc/fluid/api/nn/switch_case.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a5e260a6a26b7b78b0c4b1058ebb7dd8e8221650
--- /dev/null
+++ b/doc/fluid/api/nn/switch_case.rst
@@ -0,0 +1,7 @@
+.. _api_nn_switch_case:
+
+switch_case
+-------------------------------
+:doc_source: paddle.fluid.layers.switch_case
+
+
diff --git a/doc/fluid/api/nn/tanhshrink.rst b/doc/fluid/api/nn/tanhshrink.rst
new file mode 100644
index 0000000000000000000000000000000000000000..88160bcc6098f5566f86a4128a4f8a09e79384cc
--- /dev/null
+++ b/doc/fluid/api/nn/tanhshrink.rst
@@ -0,0 +1,9 @@
+.. _api_nn_tanhshrink:
+
+tanhshrink
+-----------
+
+.. autofunction:: paddle.nn.functional.tanhshrink
+ :noindex:
+
+
diff --git a/doc/fluid/api/nn/target_assign.rst b/doc/fluid/api/nn/target_assign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0a1337fafa87f830fa70458cafdad5fea62274df
--- /dev/null
+++ b/doc/fluid/api/nn/target_assign.rst
@@ -0,0 +1,7 @@
+.. _api_nn_target_assign:
+
+target_assign
+-------------------------------
+:doc_source: paddle.fluid.layers.target_assign
+
+
diff --git a/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst b/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d31dbd96f7cb4abb70d94e9dde8927c6e7cea3d3
--- /dev/null
+++ b/doc/fluid/api/nn/teacher_student_sigmoid_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_teacher_student_sigmoid_loss:
+
+teacher_student_sigmoid_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.teacher_student_sigmoid_loss
+
+
diff --git a/doc/fluid/api/nn/temporal_shift.rst b/doc/fluid/api/nn/temporal_shift.rst
new file mode 100644
index 0000000000000000000000000000000000000000..85eb76bad6ad68e0267f9a77b8d893040133ee6b
--- /dev/null
+++ b/doc/fluid/api/nn/temporal_shift.rst
@@ -0,0 +1,7 @@
+.. _api_nn_temporal_shift:
+
+temporal_shift
+-------------------------------
+:doc_source: paddle.fluid.layers.temporal_shift
+
+
diff --git a/doc/fluid/api/nn/thresholded_relu.rst b/doc/fluid/api/nn/thresholded_relu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b35c9d260c25f4b47d487e0d0b6eec1e43540937
--- /dev/null
+++ b/doc/fluid/api/nn/thresholded_relu.rst
@@ -0,0 +1,7 @@
+.. _api_nn_thresholded_relu:
+
+thresholded_relu
+-------------------------------
+:doc_source: paddle.fluid.layers.thresholded_relu
+
+
diff --git a/doc/fluid/api/nn/unfold.rst b/doc/fluid/api/nn/unfold.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9427a16eeda20d5c68542257f8ca08d6c8ac2f3a
--- /dev/null
+++ b/doc/fluid/api/nn/unfold.rst
@@ -0,0 +1,7 @@
+.. _api_nn_unfold:
+
+unfold
+-------------------------------
+:doc_source: paddle.fluid.layers.unfold
+
+
diff --git a/doc/fluid/api/nn/warpctc.rst b/doc/fluid/api/nn/warpctc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a7d791d58a4e985f620be9b43d2134422be9f8c8
--- /dev/null
+++ b/doc/fluid/api/nn/warpctc.rst
@@ -0,0 +1,7 @@
+.. _api_nn_warpctc:
+
+warpctc
+-------------------------------
+:doc_source: paddle.fluid.layers.warpctc
+
+
diff --git a/doc/fluid/api/nn/while_loop.rst b/doc/fluid/api/nn/while_loop.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9083e5c9184fb2b73462ccccac0c3ec74408ca1e
--- /dev/null
+++ b/doc/fluid/api/nn/while_loop.rst
@@ -0,0 +1,7 @@
+.. _api_nn_while_loop:
+
+while_loop
+-------------------------------
+:doc_source: paddle.fluid.layers.while_loop
+
+
diff --git a/doc/fluid/api/nn/yolo_box.rst b/doc/fluid/api/nn/yolo_box.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc6394dc027fea0a9b66172a221f921b2727c7a2
--- /dev/null
+++ b/doc/fluid/api/nn/yolo_box.rst
@@ -0,0 +1,7 @@
+.. _api_nn_yolo_box:
+
+yolo_box
+-------------------------------
+:doc_source: paddle.fluid.layers.yolo_box
+
+
diff --git a/doc/fluid/api/nn/yolov3_loss.rst b/doc/fluid/api/nn/yolov3_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6b47f3c3b640725c35fcd34b6333782fdfb8a0a0
--- /dev/null
+++ b/doc/fluid/api/nn/yolov3_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_yolov3_loss:
+
+yolov3_loss
+-------------------------------
+:doc_source: paddle.fluid.layers.yolov3_loss
+
+
diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst
index 6f0377a58632782c3f12ccfbad658224286af3b5..b71f55f55e9791c4d0b5790449965aa6f4d4098e 100644
--- a/doc/fluid/api/optimizer.rst
+++ b/doc/fluid/api/optimizer.rst
@@ -1,6 +1,6 @@
-===============
-fluid.optimizer
-===============
+======================
+paddle.optimizer
+======================
.. toctree::
:maxdepth: 1
@@ -11,8 +11,7 @@ fluid.optimizer
optimizer/AdagradOptimizer.rst
optimizer/Adam.rst
optimizer/Adamax.rst
- optimizer/AdamaxOptimizer.rst
- optimizer/AdamOptimizer.rst
+ optimizer/AdamW.rst
optimizer/DecayedAdagrad.rst
optimizer/DecayedAdagradOptimizer.rst
optimizer/DGCMomentumOptimizer.rst
@@ -28,8 +27,20 @@ fluid.optimizer
optimizer/ModelAverage.rst
optimizer/Momentum.rst
optimizer/MomentumOptimizer.rst
- optimizer/PipelineOptimizer.rst
optimizer/RecomputeOptimizer.rst
- optimizer/RMSPropOptimizer.rst
+ optimizer/RMSProp.rst
optimizer/SGD.rst
optimizer/SGDOptimizer.rst
+ optimizer/Optimizer.rst
+ optimizer/NoamLR.rst
+ optimizer/PiecewiseLR.rst
+ optimizer/NaturalExpLR.rst
+ optimizer/InverseTimeLR.rst
+ optimizer/PolynomialLR.rst
+ optimizer/LinearLrWarmup.rst
+ optimizer/ExponentialLR.rst
+ optimizer/MultiStepLR.rst
+ optimizer/StepLR.rst
+ optimizer/LambdaLR.rst
+ optimizer/ReduceLROnPlateau.rst
+ optimizer/CosineAnnealingLR.rst
diff --git a/doc/fluid/api/optimizer/Adadelta.rst b/doc/fluid/api/optimizer/Adadelta.rst
index cba6c6fc6f6f5743c883b9be706e7ded8355531a..3d4d505c5a37304dfb9a1827a07900f392b06eb3 100644
--- a/doc/fluid/api/optimizer/Adadelta.rst
+++ b/doc/fluid/api/optimizer/Adadelta.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Adadelta:
+.. _api_optimizer_Adadelta:
Adadelta
--------
-.. autoclass:: paddle.fluid.optimizer.Adadelta
+.. autoclass:: paddle.optimizer.Adadelta
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst
index 14692902f21bafcac34ca0460de71576f47cfb5b..160a64b4d2d9bc0496c857e42c96295650ebef89 100644
--- a/doc/fluid/api/optimizer/AdadeltaOptimizer.rst
+++ b/doc/fluid/api/optimizer/AdadeltaOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_AdadeltaOptimizer:
+.. _api_optimizer_AdadeltaOptimizer:
AdadeltaOptimizer
-----------------
-.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer
+.. autoclass:: paddle.optimizer.AdadeltaOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/Adagrad.rst b/doc/fluid/api/optimizer/Adagrad.rst
index b955fa9f7df88b14eb749695f9af6e4c5e704f19..deef2879fbe595a7b8f81980e729c35f9d5d4209 100644
--- a/doc/fluid/api/optimizer/Adagrad.rst
+++ b/doc/fluid/api/optimizer/Adagrad.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Adagrad:
+.. _api_optimizer_Adagrad:
Adagrad
-------
-.. autoclass:: paddle.fluid.optimizer.Adagrad
+.. autoclass:: paddle.optimizer.Adagrad
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/AdagradOptimizer.rst b/doc/fluid/api/optimizer/AdagradOptimizer.rst
index e52a9de102f86f386ceb38c65325e7ac53c34982..e125000739edf62aedfca8f40e3782730f1ca0c9 100644
--- a/doc/fluid/api/optimizer/AdagradOptimizer.rst
+++ b/doc/fluid/api/optimizer/AdagradOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_AdagradOptimizer:
+.. _api_optimizer_AdagradOptimizer:
AdagradOptimizer
----------------
-.. autoclass:: paddle.fluid.optimizer.AdagradOptimizer
+.. autoclass:: paddle.optimizer.AdagradOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/Adam.rst b/doc/fluid/api/optimizer/Adam.rst
index 4c7f6fd062f5b25d7202554eab1dc01546cebc14..f9bbe07dc855d9610e8c85892bd188b91cbd6aa4 100644
--- a/doc/fluid/api/optimizer/Adam.rst
+++ b/doc/fluid/api/optimizer/Adam.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Adam:
+.. _api_optimizer_Adam:
Adam
----
-.. autoclass:: paddle.fluid.optimizer.Adam
+.. autoclass:: paddle.optimizer.Adam
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/AdamOptimizer.rst b/doc/fluid/api/optimizer/AdamOptimizer.rst
deleted file mode 100644
index 9a966f54c29bbef6153c099e373797deb3ae8995..0000000000000000000000000000000000000000
--- a/doc/fluid/api/optimizer/AdamOptimizer.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_optimizer_AdamOptimizer:
-
-AdamOptimizer
--------------
-
-.. autoclass:: paddle.fluid.optimizer.AdamOptimizer
- :members:
- :inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
- :noindex:
-
diff --git a/doc/fluid/api/optimizer/AdamW.rst b/doc/fluid/api/optimizer/AdamW.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c76eb48cdf1bcd7f4167d02cead45a0b7434ddae
--- /dev/null
+++ b/doc/fluid/api/optimizer/AdamW.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_AdamW:
+
+AdamW
+-----
+
+.. autoclass:: paddle.optimizer.AdamW
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/Adamax.rst b/doc/fluid/api/optimizer/Adamax.rst
index 7f0ed4935516d67a9944ad633219ccc1fdcd4753..36fb8509f0b596dca21a1c2fcc6b12c1e3b77fe0 100644
--- a/doc/fluid/api/optimizer/Adamax.rst
+++ b/doc/fluid/api/optimizer/Adamax.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Adamax:
+.. _api_optimizer_Adamax:
Adamax
------
-.. autoclass:: paddle.fluid.optimizer.Adamax
+.. autoclass:: paddle.optimizer.Adamax
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/AdamaxOptimizer.rst b/doc/fluid/api/optimizer/AdamaxOptimizer.rst
deleted file mode 100644
index b27b7aab44cb76f611ffa2e5512dd5e98597a602..0000000000000000000000000000000000000000
--- a/doc/fluid/api/optimizer/AdamaxOptimizer.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_optimizer_AdamaxOptimizer:
-
-AdamaxOptimizer
----------------
-
-.. autoclass:: paddle.fluid.optimizer.AdamaxOptimizer
- :members:
- :inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
- :noindex:
-
diff --git a/doc/fluid/api/optimizer/CosineAnnealingLR.rst b/doc/fluid/api/optimizer/CosineAnnealingLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..508ed824d4c8519e1096dc4a4442378b2f4b784f
--- /dev/null
+++ b/doc/fluid/api/optimizer/CosineAnnealingLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_CosineAnnealingLR:
+
+CosineAnnealingLR
+-------------------
+
+.. autoclass:: paddle.optimizer.CosineAnnealingLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst
index 2305e30ef77ec39ab0341d8d940fdcc447d0f4f6..aa7a3517c38066965084b2bc990621b7f494a008 100644
--- a/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst
+++ b/doc/fluid/api/optimizer/DGCMomentumOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_DGCMomentumOptimizer:
+.. _api_optimizer_DGCMomentumOptimizer:
DGCMomentumOptimizer
--------------------
-.. autoclass:: paddle.fluid.optimizer.DGCMomentumOptimizer
+.. autoclass:: paddle.optimizer.DGCMomentumOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/DecayedAdagrad.rst b/doc/fluid/api/optimizer/DecayedAdagrad.rst
index f2b37dda5cf323328d34005e49b36ccfa2436051..e3f1c574d8b236ed6e5883030b2793f6056ce996 100644
--- a/doc/fluid/api/optimizer/DecayedAdagrad.rst
+++ b/doc/fluid/api/optimizer/DecayedAdagrad.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_DecayedAdagrad:
+.. _api_optimizer_DecayedAdagrad:
DecayedAdagrad
--------------
-.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad
+.. autoclass:: paddle.optimizer.DecayedAdagrad
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst
index f0aa277010fcbbc11642be3f6f50f97e1c5a7197..cf0d4452bec9dfa944cd1e8cf365ae899e10cf1c 100644
--- a/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst
+++ b/doc/fluid/api/optimizer/DecayedAdagradOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_DecayedAdagradOptimizer:
+.. _api_optimizer_DecayedAdagradOptimizer:
DecayedAdagradOptimizer
-----------------------
-.. autoclass:: paddle.fluid.optimizer.DecayedAdagradOptimizer
+.. autoclass:: paddle.optimizer.DecayedAdagradOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/Dpsgd.rst b/doc/fluid/api/optimizer/Dpsgd.rst
index 161606af2c53bb8766141f0a703f2af312ccd55b..f8fbfbf653ab191facfd0b9ca032bc334daa84b6 100644
--- a/doc/fluid/api/optimizer/Dpsgd.rst
+++ b/doc/fluid/api/optimizer/Dpsgd.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Dpsgd:
+.. _api_optimizer_Dpsgd:
Dpsgd
-----
-.. autoclass:: paddle.fluid.optimizer.Dpsgd
+.. autoclass:: paddle.optimizer.Dpsgd
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/DpsgdOptimizer.rst b/doc/fluid/api/optimizer/DpsgdOptimizer.rst
index d2462515a648507e296f7dd31da44c7aaeb3633f..cbb1b8a92780f48665247e8627429914b7f92740 100644
--- a/doc/fluid/api/optimizer/DpsgdOptimizer.rst
+++ b/doc/fluid/api/optimizer/DpsgdOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_DpsgdOptimizer:
+.. _api_optimizer_DpsgdOptimizer:
DpsgdOptimizer
--------------
-.. autoclass:: paddle.fluid.optimizer.DpsgdOptimizer
+.. autoclass:: paddle.optimizer.DpsgdOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/ExponentialLR.rst b/doc/fluid/api/optimizer/ExponentialLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5a22fb820347b0bf30b2eb25ab9cace7230e60f1
--- /dev/null
+++ b/doc/fluid/api/optimizer/ExponentialLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_ExponentialLR:
+
+ExponentialLR
+-------------------
+
+.. autoclass:: paddle.optimizer.ExponentialLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst
index 41f2b39ae95e9cd4a2598863e13ad455d7bb81dc..173608910d27c20f582bfaae90316214d994867a 100644
--- a/doc/fluid/api/optimizer/ExponentialMovingAverage.rst
+++ b/doc/fluid/api/optimizer/ExponentialMovingAverage.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_ExponentialMovingAverage:
+.. _api_optimizer_ExponentialMovingAverage:
ExponentialMovingAverage
------------------------
-.. autoclass:: paddle.fluid.optimizer.ExponentialMovingAverage
+.. autoclass:: paddle.optimizer.ExponentialMovingAverage
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/Ftrl.rst b/doc/fluid/api/optimizer/Ftrl.rst
index f8bcb617dbf81a51ac2c35b7f7bdd2f50b90c39e..85a5ab6eee34296f7546073cbadb9ac5ea6044eb 100644
--- a/doc/fluid/api/optimizer/Ftrl.rst
+++ b/doc/fluid/api/optimizer/Ftrl.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Ftrl:
+.. _api_optimizer_Ftrl:
Ftrl
----
-.. autoclass:: paddle.fluid.optimizer.Ftrl
+.. autoclass:: paddle.optimizer.Ftrl
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/FtrlOptimizer.rst b/doc/fluid/api/optimizer/FtrlOptimizer.rst
index 3875801869f6e2dd70b54a222f047fe4339fb27f..fcbbcc52eafc471cb3604b9183e71d34210c7667 100644
--- a/doc/fluid/api/optimizer/FtrlOptimizer.rst
+++ b/doc/fluid/api/optimizer/FtrlOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_FtrlOptimizer:
+.. _api_optimizer_FtrlOptimizer:
FtrlOptimizer
-------------
-.. autoclass:: paddle.fluid.optimizer.FtrlOptimizer
+.. autoclass:: paddle.optimizer.FtrlOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/InverseTimeLR.rst b/doc/fluid/api/optimizer/InverseTimeLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..34fa8859a463d05ccf15161ae974c4cd598df4d2
--- /dev/null
+++ b/doc/fluid/api/optimizer/InverseTimeLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_InverseTimeLR:
+
+InverseTimeLR
+-------------------
+
+.. autoclass:: paddle.optimizer.InverseTimeLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/LambOptimizer.rst b/doc/fluid/api/optimizer/LambOptimizer.rst
index db8fc7153c7bc766f03faa7d758ed080e2ea2ca8..f661af2276be2f2b406847fba6cc6043bbd5f0d6 100644
--- a/doc/fluid/api/optimizer/LambOptimizer.rst
+++ b/doc/fluid/api/optimizer/LambOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_LambOptimizer:
+.. _api_optimizer_LambOptimizer:
LambOptimizer
-------------
-.. autoclass:: paddle.fluid.optimizer.LambOptimizer
+.. autoclass:: paddle.optimizer.LambOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/LambdaLR.rst b/doc/fluid/api/optimizer/LambdaLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d37bd5019e211d47c1a686d9e2af49c7f1ad684e
--- /dev/null
+++ b/doc/fluid/api/optimizer/LambdaLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_LambdaLR:
+
+LambdaLR
+-------------------
+
+.. autoclass:: paddle.optimizer.LambdaLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/LarsMomentum.rst b/doc/fluid/api/optimizer/LarsMomentum.rst
index 396f93416f830b286eb6c0f93f799523d651b2dd..199afcd78c62b1987ed06eaab71eae3542fac303 100644
--- a/doc/fluid/api/optimizer/LarsMomentum.rst
+++ b/doc/fluid/api/optimizer/LarsMomentum.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_LarsMomentum:
+.. _api_optimizer_LarsMomentum:
LarsMomentum
------------
-.. autoclass:: paddle.fluid.optimizer.LarsMomentum
+.. autoclass:: paddle.optimizer.LarsMomentum
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst
index daf1631c128d85fe9d2807694261ea740e900f8d..a19d0025d49100b912165b840361b093baad61d9 100644
--- a/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst
+++ b/doc/fluid/api/optimizer/LarsMomentumOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_LarsMomentumOptimizer:
+.. _api_optimizer_LarsMomentumOptimizer:
LarsMomentumOptimizer
---------------------
-.. autoclass:: paddle.fluid.optimizer.LarsMomentumOptimizer
+.. autoclass:: paddle.optimizer.LarsMomentumOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/LinearLrWarmup.rst b/doc/fluid/api/optimizer/LinearLrWarmup.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0fa5ebf876247210f543b22e9813f9d6c3118c5f
--- /dev/null
+++ b/doc/fluid/api/optimizer/LinearLrWarmup.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_LinearLrWarmup:
+
+LinearLrWarmup
+-------------------
+
+.. autoclass:: paddle.optimizer.LinearLrWarmup
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/LookaheadOptimizer.rst b/doc/fluid/api/optimizer/LookaheadOptimizer.rst
index e87be3eefdf307189f380e13ba548855db88df7d..663b5662ceea49052e602dfac441717543960a53 100644
--- a/doc/fluid/api/optimizer/LookaheadOptimizer.rst
+++ b/doc/fluid/api/optimizer/LookaheadOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_LookaheadOptimizer:
+.. _api_optimizer_LookaheadOptimizer:
LookaheadOptimizer
------------------
-.. autoclass:: paddle.fluid.optimizer.LookaheadOptimizer
+.. autoclass:: paddle.optimizer.LookaheadOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/ModelAverage.rst b/doc/fluid/api/optimizer/ModelAverage.rst
index 86e458ef704c18173b5586b5042768a037ba32d9..5a67fc1b195c15f9dabe32253b06f9c60017e96e 100644
--- a/doc/fluid/api/optimizer/ModelAverage.rst
+++ b/doc/fluid/api/optimizer/ModelAverage.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_ModelAverage:
+.. _api_optimizer_ModelAverage:
ModelAverage
------------
-.. autoclass:: paddle.fluid.optimizer.ModelAverage
+.. autoclass:: paddle.optimizer.ModelAverage
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/Momentum.rst b/doc/fluid/api/optimizer/Momentum.rst
index 7b54d7cc1fcbb434c929cf27a86f577372cd1d91..f0ef70be9ee60de6bdc4de18d95594db18ad1eea 100644
--- a/doc/fluid/api/optimizer/Momentum.rst
+++ b/doc/fluid/api/optimizer/Momentum.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_Momentum:
+.. _api_optimizer_Momentum:
Momentum
--------
-.. autoclass:: paddle.fluid.optimizer.Momentum
+.. autoclass:: paddle.optimizer.Momentum
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/MomentumOptimizer.rst b/doc/fluid/api/optimizer/MomentumOptimizer.rst
index 1f0c6c632918b8ed7a32c8c16a1dad642ed2e64f..495c5fe91a06890424db93158407ddf28f5854d7 100644
--- a/doc/fluid/api/optimizer/MomentumOptimizer.rst
+++ b/doc/fluid/api/optimizer/MomentumOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_MomentumOptimizer:
+.. _api_optimizer_MomentumOptimizer:
MomentumOptimizer
-----------------
-.. autoclass:: paddle.fluid.optimizer.MomentumOptimizer
+.. autoclass:: paddle.optimizer.MomentumOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/MultiStepLR.rst b/doc/fluid/api/optimizer/MultiStepLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b5ee3da3f3ec8b6a4af5308bba42f63afa2c6d75
--- /dev/null
+++ b/doc/fluid/api/optimizer/MultiStepLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_MultiStepLR:
+
+MultiStepLR
+-------------------
+
+.. autoclass:: paddle.optimizer.MultiStepLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/NaturalExpLR.rst b/doc/fluid/api/optimizer/NaturalExpLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2bb653c7d7e0364c1d77ddfa8460c29a5b7937b8
--- /dev/null
+++ b/doc/fluid/api/optimizer/NaturalExpLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_NaturalExpLR:
+
+NaturalExpLR
+-------------------
+
+.. autoclass:: paddle.optimizer.NaturalExpLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/NoamLR.rst b/doc/fluid/api/optimizer/NoamLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9abbe5997cd7970e280749e94afabc6aa99891d9
--- /dev/null
+++ b/doc/fluid/api/optimizer/NoamLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_NoamLR:
+
+NoamLR
+-------------------
+
+.. autoclass:: paddle.optimizer.NoamLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/Optimizer.rst b/doc/fluid/api/optimizer/Optimizer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1ef98eab55c524d18c27a19f951c8d081c26894a
--- /dev/null
+++ b/doc/fluid/api/optimizer/Optimizer.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_Optimizer:
+
+Optimizer
+---------
+
+.. autoclass:: paddle.optimizer.Optimizer
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/PiecewiseLR.rst b/doc/fluid/api/optimizer/PiecewiseLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e9dea7704ca393f76ceb61cd2c0359f1505a09ee
--- /dev/null
+++ b/doc/fluid/api/optimizer/PiecewiseLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_PiecewiseLR:
+
+PiecewiseLR
+-------------------
+
+.. autoclass:: paddle.optimizer.PiecewiseLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/PipelineOptimizer.rst b/doc/fluid/api/optimizer/PipelineOptimizer.rst
deleted file mode 100644
index 87e6f4026d49f4db11dec390faf325082bb1fdbe..0000000000000000000000000000000000000000
--- a/doc/fluid/api/optimizer/PipelineOptimizer.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_optimizer_PipelineOptimizer:
-
-PipelineOptimizer
------------------
-
-.. autoclass:: paddle.fluid.optimizer.PipelineOptimizer
- :members:
- :inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
- :noindex:
-
diff --git a/doc/fluid/api/optimizer/PolynomialLR.rst b/doc/fluid/api/optimizer/PolynomialLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5ce54b1517fb3407a6f4715d66afd2520809f3d8
--- /dev/null
+++ b/doc/fluid/api/optimizer/PolynomialLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_PolynomialLR:
+
+PolynomialLR
+-------------------
+
+.. autoclass:: paddle.optimizer.PolynomialLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/RMSProp.rst b/doc/fluid/api/optimizer/RMSProp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..903acc26a3316e312ea1145819aec347e57e7109
--- /dev/null
+++ b/doc/fluid/api/optimizer/RMSProp.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_RMSProp:
+
+RMSProp
+-------
+
+.. autoclass:: paddle.optimizer.RMSProp
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/RMSPropOptimizer.rst b/doc/fluid/api/optimizer/RMSPropOptimizer.rst
deleted file mode 100644
index 237c4ea71e45039063acce502b9c0bd9800e9ffd..0000000000000000000000000000000000000000
--- a/doc/fluid/api/optimizer/RMSPropOptimizer.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_optimizer_RMSPropOptimizer:
-
-RMSPropOptimizer
-----------------
-
-.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer
- :members:
- :inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
- :noindex:
-
diff --git a/doc/fluid/api/optimizer/RecomputeOptimizer.rst b/doc/fluid/api/optimizer/RecomputeOptimizer.rst
index 479037eebbb22fa8ea02951ed6e380d5753fb171..4891f58c146ef39aaec42adcd6667bba76052f8a 100644
--- a/doc/fluid/api/optimizer/RecomputeOptimizer.rst
+++ b/doc/fluid/api/optimizer/RecomputeOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_RecomputeOptimizer:
+.. _api_optimizer_RecomputeOptimizer:
RecomputeOptimizer
------------------
-.. autoclass:: paddle.fluid.optimizer.RecomputeOptimizer
+.. autoclass:: paddle.optimizer.RecomputeOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/ReduceLROnPlateau.rst b/doc/fluid/api/optimizer/ReduceLROnPlateau.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f31bfefddf714db6a7fdcacfb208014fbe791e1f
--- /dev/null
+++ b/doc/fluid/api/optimizer/ReduceLROnPlateau.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_ReduceLROnPlateau:
+
+ReduceLROnPlateau
+-------------------
+
+.. autoclass:: paddle.optimizer.ReduceLROnPlateau
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/optimizer/SGD.rst b/doc/fluid/api/optimizer/SGD.rst
index fa18269ad7536cc5aeaed1dd8f80775b3cf87775..9af1bc8d60cd30df57fcbd1b8b1ac8a00073c6df 100644
--- a/doc/fluid/api/optimizer/SGD.rst
+++ b/doc/fluid/api/optimizer/SGD.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_SGD:
+.. _api_optimizer_SGD:
SGD
---
-.. autoclass:: paddle.fluid.optimizer.SGD
+.. autoclass:: paddle.optimizer.SGD
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/SGDOptimizer.rst b/doc/fluid/api/optimizer/SGDOptimizer.rst
index c6ec7ea6fd0d64b50676584c3b60733b274986fe..e36d63d41aa762daaf0dca943fb4de91de7bfc75 100644
--- a/doc/fluid/api/optimizer/SGDOptimizer.rst
+++ b/doc/fluid/api/optimizer/SGDOptimizer.rst
@@ -1,14 +1,13 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
-.. _api_fluid_optimizer_SGDOptimizer:
+.. _api_optimizer_SGDOptimizer:
SGDOptimizer
------------
-.. autoclass:: paddle.fluid.optimizer.SGDOptimizer
+.. autoclass:: paddle.optimizer.SGDOptimizer
:members:
:inherited-members:
- :exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
diff --git a/doc/fluid/api/optimizer/StepLR.rst b/doc/fluid/api/optimizer/StepLR.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0524a33239072b4a6afee9803882e675879adb93
--- /dev/null
+++ b/doc/fluid/api/optimizer/StepLR.rst
@@ -0,0 +1,14 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_optimizer_StepLR:
+
+StepLR
+-------------------
+
+.. autoclass:: paddle.optimizer.StepLR
+ :members:
+ :inherited-members:
+ :exclude-members: set_dict, set_state_dict, state_dict
+ :noindex:
+
diff --git a/doc/fluid/api/paddle.rst b/doc/fluid/api/paddle.rst
new file mode 100644
index 0000000000000000000000000000000000000000..45d26734e0d95205876b1b586d1c24becbf522ce
--- /dev/null
+++ b/doc/fluid/api/paddle.rst
@@ -0,0 +1,184 @@
+=======================
+paddle
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ paddle/abs.rst
+ paddle/acos.rst
+ paddle/add.rst
+ paddle/addcmul.rst
+ paddle/addmm.rst
+ paddle/allclose.rst
+ paddle/append_backward.rst
+ paddle/arange.rst
+ paddle/argmax.rst
+ paddle/argmin.rst
+ paddle/argsort.rst
+ paddle/asin.rst
+ paddle/atan.rst
+ paddle/bmm.rst
+ paddle/BuildStrategy.rst
+ paddle/cast.rst
+ paddle/ceil.rst
+ paddle/cholesky.rst
+ paddle/chunk.rst
+ paddle/clamp.rst
+ paddle/CompiledProgram.rst
+ paddle/concat.rst
+ paddle/cos.rst
+ paddle/CPUPlace.rst
+ paddle/create_global_var.rst
+ paddle/create_parameter.rst
+ paddle/create_tensor.rst
+ paddle/crop_tensor.rst
+ paddle/cross.rst
+ paddle/CUDAPinnedPlace.rst
+ paddle/CUDAPlace.rst
+ paddle/cumsum.rst
+ paddle/DataParallel.rst
+ paddle/default_main_program.rst
+ paddle/default_startup_program.rst
+ paddle/diag.rst
+ paddle/disable_imperative.rst
+ paddle/dist.rst
+ paddle/distribution.rst
+ paddle/div.rst
+ paddle/dot.rst
+ paddle/elementwise_add.rst
+ paddle/elementwise_div.rst
+ paddle/elementwise_floordiv.rst
+ paddle/elementwise_mod.rst
+ paddle/elementwise_mul.rst
+ paddle/elementwise_pow.rst
+ paddle/elementwise_sub.rst
+ paddle/elementwise_sum.rst
+ paddle/enable_imperative.rst
+ paddle/equal.rst
+ paddle/equal_all.rst
+ paddle/erf.rst
+ paddle/ExecutionStrategy.rst
+ paddle/Executor.rst
+ paddle/exp.rst
+ paddle/expand.rst
+ paddle/expand_as.rst
+ paddle/eye.rst
+ paddle/fill_constant.rst
+ paddle/flatten.rst
+ paddle/flip.rst
+ paddle/floor.rst
+ paddle/full.rst
+ paddle/full_like.rst
+ paddle/gather.rst
+ paddle/gather_nd.rst
+ paddle/global_scope.rst
+ paddle/gradients.rst
+ paddle/greater_equal.rst
+ paddle/greater_than.rst
+ paddle/has_inf.rst
+ paddle/has_nan.rst
+ paddle/in_imperative_mode.rst
+ paddle/increment.rst
+ paddle/index_sample.rst
+ paddle/index_select.rst
+ paddle/inverse.rst
+ paddle/is_empty.rst
+ paddle/isfinite.rst
+ paddle/kron.rst
+ paddle/less_equal.rst
+ paddle/less_than.rst
+ paddle/linspace.rst
+ paddle/load.rst
+ paddle/log.rst
+ paddle/log1p.rst
+ paddle/logical_and.rst
+ paddle/logical_not.rst
+ paddle/logical_or.rst
+ paddle/logical_xor.rst
+ paddle/logsumexp.rst
+ paddle/manual_seed.rst
+ paddle/masked_select.rst
+ paddle/matmul.rst
+ paddle/max.rst
+ paddle/maximum.rst
+ paddle/mean.rst
+ paddle/meshgrid.rst
+ paddle/min.rst
+ paddle/minimum.rst
+ paddle/mm.rst
+ paddle/mul.rst
+ paddle/multiplex.rst
+ paddle/name_scope.rst
+ paddle/nonzero.rst
+ paddle/norm.rst
+ paddle/not_equal.rst
+ paddle/ones.rst
+ paddle/ones_like.rst
+ paddle/numel.rst
+ paddle/ParallelExecutor.rst
+ paddle/ParamAttr.rst
+ paddle/pow.rst
+ paddle/Print.rst
+ paddle/Program.rst
+ paddle/program_guard.rst
+ paddle/py_func.rst
+ paddle/rand.rst
+ paddle/randint.rst
+ paddle/randn.rst
+ paddle/randperm.rst
+ paddle/rank.rst
+ paddle/reciprocal.rst
+ paddle/reduce_all.rst
+ paddle/reduce_any.rst
+ paddle/reduce_max.rst
+ paddle/reduce_mean.rst
+ paddle/reduce_min.rst
+ paddle/reduce_prod.rst
+ paddle/reduce_sum.rst
+ paddle/reshape.rst
+ paddle/reverse.rst
+ paddle/roll.rst
+ paddle/round.rst
+ paddle/rsqrt.rst
+ paddle/save.rst
+ paddle/scale.rst
+ paddle/scatter.rst
+ paddle/scatter_nd.rst
+ paddle/scatter_nd_add.rst
+ paddle/scope_guard.rst
+ paddle/shape.rst
+ paddle/shard_index.rst
+ paddle/shuffle.rst
+ paddle/sign.rst
+ paddle/sin.rst
+ paddle/slice.rst
+ paddle/sort.rst
+ paddle/split.rst
+ paddle/sqrt.rst
+ paddle/square.rst
+ paddle/squeeze.rst
+ paddle/stack.rst
+ paddle/stanh.rst
+ paddle/std.rst
+ paddle/strided_slice.rst
+ paddle/sum.rst
+ paddle/sums.rst
+ paddle/t.rst
+ paddle/tanh.rst
+ paddle/topk.rst
+ paddle/trace.rst
+ paddle/transpose.rst
+ paddle/tril.rst
+ paddle/triu.rst
+ paddle/unbind.rst
+ paddle/unique.rst
+ paddle/unique_with_counts.rst
+ paddle/unsqueeze.rst
+ paddle/unstack.rst
+ paddle/var.rst
+ paddle/Variable.rst
+ paddle/WeightNormParamAttr.rst
+ paddle/where.rst
+ paddle/zeros.rst
+ paddle/zeros_like.rst
diff --git a/doc/fluid/api/paddle/BuildStrategy.rst b/doc/fluid/api/paddle/BuildStrategy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..44536026e28abfae9b44cbee7e8b3d534eef468e
--- /dev/null
+++ b/doc/fluid/api/paddle/BuildStrategy.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_BuildStrategy:
+
+BuildStrategy
+-------------------------------
+:doc_source: paddle.fluid.compiler.BuildStrategy
+
+
diff --git a/doc/fluid/api/paddle/CPUPlace.rst b/doc/fluid/api/paddle/CPUPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b586e8f448782de776fca5a2501feb40b2f1748
--- /dev/null
+++ b/doc/fluid/api/paddle/CPUPlace.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_CPUPlace:
+
+CPUPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CPUPlace
+
+
diff --git a/doc/fluid/api/paddle/CUDAPinnedPlace.rst b/doc/fluid/api/paddle/CUDAPinnedPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2034660f2e7dd32a57319a3048446a63ab8fbd09
--- /dev/null
+++ b/doc/fluid/api/paddle/CUDAPinnedPlace.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_CUDAPinnedPlace:
+
+CUDAPinnedPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPinnedPlace
+
+
diff --git a/doc/fluid/api/paddle/CUDAPlace.rst b/doc/fluid/api/paddle/CUDAPlace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c0563bbe76c969f51379d25140a2a17bb7a4da08
--- /dev/null
+++ b/doc/fluid/api/paddle/CUDAPlace.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_CUDAPlace:
+
+CUDAPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPlace
+
+
diff --git a/doc/fluid/api/paddle/CompiledProgram.rst b/doc/fluid/api/paddle/CompiledProgram.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f317bf12c45b29bc28025220a41e27b6383de854
--- /dev/null
+++ b/doc/fluid/api/paddle/CompiledProgram.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_CompiledProgram:
+
+CompiledProgram
+-------------------------------
+:doc_source: paddle.fluid.compiler.CompiledProgram
+
+
diff --git a/doc/fluid/api/paddle/DataParallel.rst b/doc/fluid/api/paddle/DataParallel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c79fbe9b16051b0201bb575a84f2bdb397ba1b1
--- /dev/null
+++ b/doc/fluid/api/paddle/DataParallel.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_DataParallel:
+
+DataParallel
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.DataParallel
+
+
diff --git a/doc/fluid/api/paddle/ExecutionStrategy.rst b/doc/fluid/api/paddle/ExecutionStrategy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6df5ca375f2e26b5bd9d4fe999461c41be9ad315
--- /dev/null
+++ b/doc/fluid/api/paddle/ExecutionStrategy.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ExecutionStrategy:
+
+ExecutionStrategy
+-------------------------------
+:doc_source: paddle.fluid.ExecutionStrategy
+
+
diff --git a/doc/fluid/api/paddle/Executor.rst b/doc/fluid/api/paddle/Executor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9d47503fea22b5225c0deed4d51f1ca20bbd7f27
--- /dev/null
+++ b/doc/fluid/api/paddle/Executor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_Executor:
+
+Executor
+-------------------------------
+:doc_source: paddle.fluid.executor.Executor
+
+
diff --git a/doc/fluid/api/paddle/ParallelExecutor.rst b/doc/fluid/api/paddle/ParallelExecutor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..de5797d80ffefb50230c88d816e8fc1d8a188a7d
--- /dev/null
+++ b/doc/fluid/api/paddle/ParallelExecutor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ParallelExecutor:
+
+ParallelExecutor
+-------------------------------
+:doc_source: paddle.fluid.parallel_executor.ParallelExecutor
+
+
diff --git a/doc/fluid/api/paddle/ParamAttr.rst b/doc/fluid/api/paddle/ParamAttr.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ba9df2c1229b9e15c265343e02e17aaaf14efe2c
--- /dev/null
+++ b/doc/fluid/api/paddle/ParamAttr.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ParamAttr:
+
+ParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.ParamAttr
+
+
diff --git a/doc/fluid/api/paddle/Print.rst b/doc/fluid/api/paddle/Print.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5c78554268bf667f5bccb044218b59e1594da2c7
--- /dev/null
+++ b/doc/fluid/api/paddle/Print.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_Print:
+
+Print
+-------------------------------
+:doc_source: paddle.fluid.layers.control_flow.Print
+
+
diff --git a/doc/fluid/api/paddle/Program.rst b/doc/fluid/api/paddle/Program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2d96075437cfbe6c1702d13c84c71aac99800ab8
--- /dev/null
+++ b/doc/fluid/api/paddle/Program.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_Program:
+
+Program
+-------------------------------
+:doc_source: paddle.fluid.framework.Program
+
+
diff --git a/doc/fluid/api/paddle/Variable.rst b/doc/fluid/api/paddle/Variable.rst
new file mode 100644
index 0000000000000000000000000000000000000000..05275dce7eebd7637284769478121e50cc43dcde
--- /dev/null
+++ b/doc/fluid/api/paddle/Variable.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_Variable:
+
+Variable
+-------------------------------
+:doc_source: paddle.fluid.framework.Variable
+
+
diff --git a/doc/fluid/api/paddle/WeightNormParamAttr.rst b/doc/fluid/api/paddle/WeightNormParamAttr.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d8e844883d6f6231d36c6c74f45ecf4df7b9a1fb
--- /dev/null
+++ b/doc/fluid/api/paddle/WeightNormParamAttr.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_WeightNormParamAttr:
+
+WeightNormParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.WeightNormParamAttr
+
+
diff --git a/doc/fluid/api/paddle/abs.rst b/doc/fluid/api/paddle/abs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01c1ea5aa6ccd3838b4892cba7097cdb3788c190
--- /dev/null
+++ b/doc/fluid/api/paddle/abs.rst
@@ -0,0 +1,5 @@
+.. _api_paddle_abs:
+
+abs
+-------------------------------
+:doc_source: paddle.fluid.layers.abs
diff --git a/doc/fluid/api/paddle/acos.rst b/doc/fluid/api/paddle/acos.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3abda379fa7d81b7e93f68e12626fb31a26b5d75
--- /dev/null
+++ b/doc/fluid/api/paddle/acos.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_acos:
+
+acos
+-------------------------------
+:doc_source: paddle.fluid.layers.acos
+
+
diff --git a/doc/fluid/api/paddle/add.rst b/doc/fluid/api/paddle/add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..60c1446a007b13d9b94d0a5ea35db78289ac4497
--- /dev/null
+++ b/doc/fluid/api/paddle/add.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_add:
+
+add
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_add
+
+
diff --git a/doc/fluid/api/paddle/addcmul.rst b/doc/fluid/api/paddle/addcmul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f6b547d99755172de97d4dfbde9c587bd7ad57b8
--- /dev/null
+++ b/doc/fluid/api/paddle/addcmul.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_addcmul:
+
+addcmul
+-------------------------------
+:doc_source: paddle.tensor.addcmul
+
+
diff --git a/doc/fluid/api/paddle/addmm.rst b/doc/fluid/api/paddle/addmm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..786c1b1f1134e8e38cb31d42884cc24b200c124a
--- /dev/null
+++ b/doc/fluid/api/paddle/addmm.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_addmm:
+
+addmm
+-------------------------------
+:doc_source: paddle.tensor.addmm
+
+
diff --git a/doc/fluid/api/paddle/allclose.rst b/doc/fluid/api/paddle/allclose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..96041b974bd345d8838496f32c120ddf8346a3da
--- /dev/null
+++ b/doc/fluid/api/paddle/allclose.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_allclose:
+
+allclose
+-------------------------------
+:doc_source: paddle.tensor.allclose
+
+
diff --git a/doc/fluid/api/paddle/append_backward.rst b/doc/fluid/api/paddle/append_backward.rst
new file mode 100644
index 0000000000000000000000000000000000000000..608533d3c2b8efc7f71f7f1fe59be75f375100f3
--- /dev/null
+++ b/doc/fluid/api/paddle/append_backward.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_append_backward:
+
+append_backward
+-------------------------------
+:doc_source: paddle.fluid.backward.append_backward
+
+
diff --git a/doc/fluid/api/paddle/arange.rst b/doc/fluid/api/paddle/arange.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6c5d29a89c506bb01b8e4ff6b88899f7938b02f5
--- /dev/null
+++ b/doc/fluid/api/paddle/arange.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_arange:
+
+arange
+-------------------------------
+:doc_source: paddle.fluid.layers.range
+
+
diff --git a/doc/fluid/api/paddle/argmax.rst b/doc/fluid/api/paddle/argmax.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc1665f9f199d4b288686710fa60d25eadd075c3
--- /dev/null
+++ b/doc/fluid/api/paddle/argmax.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_paddle_argmax:
+
+argmax
+------
+
+.. autofunction:: paddle.tensor.search.argmax
+ :noindex:
+
diff --git a/doc/fluid/api/paddle/argmin.rst b/doc/fluid/api/paddle/argmin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..18c2334659a92457e4be4f739d94a5f8c633442e
--- /dev/null
+++ b/doc/fluid/api/paddle/argmin.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_paddle_argmin:
+
+argmin
+------
+
+.. autofunction:: paddle.tensor.search.argmin
+ :noindex:
+
+
diff --git a/doc/fluid/api/paddle/argsort.rst b/doc/fluid/api/paddle/argsort.rst
new file mode 100644
index 0000000000000000000000000000000000000000..716f7e79312bcc0f83abff33bf3684b6a6b68500
--- /dev/null
+++ b/doc/fluid/api/paddle/argsort.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_argsort:
+
+argsort
+-------------------------------
+:doc_source: paddle.tensor.argsort
+
+
diff --git a/doc/fluid/api/paddle/asin.rst b/doc/fluid/api/paddle/asin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4ceb1076217e6f2b09e7103f61414be334dcc12b
--- /dev/null
+++ b/doc/fluid/api/paddle/asin.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_asin:
+
+asin
+-------------------------------
+:doc_source: paddle.fluid.layers.asin
+
+
diff --git a/doc/fluid/api/paddle/atan.rst b/doc/fluid/api/paddle/atan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1a71ae54d75bf5393bb2565267678c47da90f7a2
--- /dev/null
+++ b/doc/fluid/api/paddle/atan.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_atan:
+
+atan
+-------------------------------
+:doc_source: paddle.fluid.layers.atan
+
+
diff --git a/doc/fluid/api/paddle/bmm.rst b/doc/fluid/api/paddle/bmm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1298fe18888d97ab9c32bad2a17fdef86ba6c5b6
--- /dev/null
+++ b/doc/fluid/api/paddle/bmm.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_bmm:
+
+bmm
+-------------------------------
+:doc_source: paddle.tensor.bmm
+
+
diff --git a/doc/fluid/api/paddle/cast.rst b/doc/fluid/api/paddle/cast.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb19a606c852b52651b97d2c45e8c948532012ba
--- /dev/null
+++ b/doc/fluid/api/paddle/cast.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_cast:
+
+cast
+-------------------------------
+:doc_source: paddle.fluid.layers.cast
+
+
diff --git a/doc/fluid/api/paddle/ceil.rst b/doc/fluid/api/paddle/ceil.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97e25c0a6952a54b2b81c3f64eb9be405c98903f
--- /dev/null
+++ b/doc/fluid/api/paddle/ceil.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ceil:
+
+ceil
+-------------------------------
+:doc_source: paddle.fluid.layers.ceil
+
+
diff --git a/doc/fluid/api/paddle/cholesky.rst b/doc/fluid/api/paddle/cholesky.rst
new file mode 100644
index 0000000000000000000000000000000000000000..23447546614321c65256fc09bff6e2b00603ed24
--- /dev/null
+++ b/doc/fluid/api/paddle/cholesky.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_cholesky:
+
+cholesky
+-------------------------------
+:doc_source: paddle.tensor.cholesky
+
+
diff --git a/doc/fluid/api/paddle/clamp.rst b/doc/fluid/api/paddle/clamp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e56b4d1017c4c5a67dafb39f1e3dac2ad03d5c46
--- /dev/null
+++ b/doc/fluid/api/paddle/clamp.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_clamp:
+
+clamp
+-------------------------------
+:doc_source: paddle.tensor.clamp
+
+
diff --git a/doc/fluid/api/paddle/concat.rst b/doc/fluid/api/paddle/concat.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4d26f1b3b9486c23e9e8fa94d85044e9f388da46
--- /dev/null
+++ b/doc/fluid/api/paddle/concat.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_concat:
+
+concat
+-------------------------------
+:doc_source: paddle.fluid.layers.concat
+
+
diff --git a/doc/fluid/api/paddle/cos.rst b/doc/fluid/api/paddle/cos.rst
new file mode 100644
index 0000000000000000000000000000000000000000..180c5cb0088c8fa407181f2c7b71535832191980
--- /dev/null
+++ b/doc/fluid/api/paddle/cos.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_cos:
+
+cos
+-------------------------------
+:doc_source: paddle.fluid.layers.cos
+
+
diff --git a/doc/fluid/api/paddle/create_global_var.rst b/doc/fluid/api/paddle/create_global_var.rst
new file mode 100644
index 0000000000000000000000000000000000000000..05ae31708a02d7f3d8d0b98afb4f96c20aabc48d
--- /dev/null
+++ b/doc/fluid/api/paddle/create_global_var.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_create_global_var:
+
+create_global_var
+-------------------------------
+:doc_source: paddle.fluid.layers.tensor.create_global_var
+
+
diff --git a/doc/fluid/api/paddle/create_parameter.rst b/doc/fluid/api/paddle/create_parameter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..daa33b854a6bcde40a7791ea1c8953ad2c9f2f32
--- /dev/null
+++ b/doc/fluid/api/paddle/create_parameter.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_create_parameter:
+
+create_parameter
+-------------------------------
+:doc_source: paddle.fluid.layers.create_parameter
+
+
diff --git a/doc/fluid/api/paddle/create_tensor.rst b/doc/fluid/api/paddle/create_tensor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3ad8cd6aa5fc3af1118e7ec10eba7d5e466fbc25
--- /dev/null
+++ b/doc/fluid/api/paddle/create_tensor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_create_tensor:
+
+create_tensor
+-------------------------------
+:doc_source: paddle.fluid.layers.create_tensor
+
+
diff --git a/doc/fluid/api/paddle/crop_tensor.rst b/doc/fluid/api/paddle/crop_tensor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c36fb311aa295b2e8e71f250ec14daadd00e9d7a
--- /dev/null
+++ b/doc/fluid/api/paddle/crop_tensor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_crop_tensor:
+
+crop_tensor
+-------------------------------
+:doc_source: paddle.fluid.layers.crop_tensor
+
+
diff --git a/doc/fluid/api/paddle/cross.rst b/doc/fluid/api/paddle/cross.rst
new file mode 100644
index 0000000000000000000000000000000000000000..50c15588babf215c9e0204f571cbda73d28aed03
--- /dev/null
+++ b/doc/fluid/api/paddle/cross.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_cross:
+
+cross
+-------------------------------
+:doc_source: paddle.tensor.cross
+
+
diff --git a/doc/fluid/api/paddle/cumsum.rst b/doc/fluid/api/paddle/cumsum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..673296e8836d1116f16d65b73a4f781241538dd4
--- /dev/null
+++ b/doc/fluid/api/paddle/cumsum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_cumsum:
+
+cumsum
+-------------------------------
+:doc_source: paddle.tensor.cumsum
+
+
diff --git a/doc/fluid/api/paddle/default_main_program.rst b/doc/fluid/api/paddle/default_main_program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8f92ac2e19b9920a4c8642ba5e945be0f9f86235
--- /dev/null
+++ b/doc/fluid/api/paddle/default_main_program.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_default_main_program:
+
+default_main_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_main_program
+
+
diff --git a/doc/fluid/api/paddle/default_startup_program.rst b/doc/fluid/api/paddle/default_startup_program.rst
new file mode 100644
index 0000000000000000000000000000000000000000..78b95f4408d62552f111711cf187df890c182cda
--- /dev/null
+++ b/doc/fluid/api/paddle/default_startup_program.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_default_startup_program:
+
+default_startup_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_startup_program
+
+
diff --git a/doc/fluid/api/paddle/diag.rst b/doc/fluid/api/paddle/diag.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1b1552f5c21e3eed6c6862088f0b61a3ab06586e
--- /dev/null
+++ b/doc/fluid/api/paddle/diag.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_diag:
+
+diag
+-------------------------------
+:doc_source: paddle.fluid.layers.diag
+
+
diff --git a/doc/fluid/api/paddle/disable_imperative.rst b/doc/fluid/api/paddle/disable_imperative.rst
new file mode 100644
index 0000000000000000000000000000000000000000..22ec4f3ad822bdea9c4869985ac1dcd47a9b6fc4
--- /dev/null
+++ b/doc/fluid/api/paddle/disable_imperative.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_disable_imperative:
+
+disable_imperative
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.disable_dygraph
+
+
diff --git a/doc/fluid/api/paddle/dist.rst b/doc/fluid/api/paddle/dist.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ca02ef267569a2888c2848446299517f8584fdef
--- /dev/null
+++ b/doc/fluid/api/paddle/dist.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_dist:
+
+dist
+-------------------------------
+:doc_source: paddle.tensor.dist
+
+
diff --git a/doc/fluid/api/paddle/distribution.rst b/doc/fluid/api/paddle/distribution.rst
new file mode 100644
index 0000000000000000000000000000000000000000..76ee9f2eeb20955bd7ed7824c75d1d28a1856272
--- /dev/null
+++ b/doc/fluid/api/paddle/distribution.rst
@@ -0,0 +1,10 @@
+============
+distribution
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ distribution/Distribution.rst
+ distribution/Normal.rst
+ distribution/Uniform.rst
diff --git a/doc/fluid/api/paddle/distribution/Distribution.rst b/doc/fluid/api/paddle/distribution/Distribution.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dbfe5082f27b75fd88efb77c2f89d4991b30c723
--- /dev/null
+++ b/doc/fluid/api/paddle/distribution/Distribution.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distribution_Distribution:
+
+Distribution
+------------
+
+.. autoclass:: paddle.distribution.Distribution
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/paddle/distribution/Normal.rst b/doc/fluid/api/paddle/distribution/Normal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f0f972fd7e561d9200fb566bab4f2019100efcbd
--- /dev/null
+++ b/doc/fluid/api/paddle/distribution/Normal.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distribution_Normal:
+
+Normal
+------
+
+.. autoclass:: paddle.distribution.Normal
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/paddle/distribution/Uniform.rst b/doc/fluid/api/paddle/distribution/Uniform.rst
new file mode 100644
index 0000000000000000000000000000000000000000..81b5d8dcdfcee360b680958a4c5005dfcca773ac
--- /dev/null
+++ b/doc/fluid/api/paddle/distribution/Uniform.rst
@@ -0,0 +1,13 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_distribution_Uniform:
+
+Uniform
+-------
+
+.. autoclass:: paddle.distribution.Uniform
+ :members:
+ :inherited-members:
+ :noindex:
+
diff --git a/doc/fluid/api/paddle/div.rst b/doc/fluid/api/paddle/div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..afb945f5c0a37f62568c788f76ec699b5befeed1
--- /dev/null
+++ b/doc/fluid/api/paddle/div.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_div:
+
+div
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_div
+
+
diff --git a/doc/fluid/api/paddle/dot.rst b/doc/fluid/api/paddle/dot.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b912365663b14779389246749586e64a98bc5575
--- /dev/null
+++ b/doc/fluid/api/paddle/dot.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_dot:
+
+dot
+-------------------------------
+:doc_source: paddle.tensor.dot
+
+
diff --git a/doc/fluid/api/paddle/elementwise_add.rst b/doc/fluid/api/paddle/elementwise_add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..196d8bade7c6b719ee54b4983a911af2c2c3733f
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_add.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_add:
+
+elementwise_add
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_add
+
+
diff --git a/doc/fluid/api/paddle/elementwise_div.rst b/doc/fluid/api/paddle/elementwise_div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c2f87134582aea7aee656b3fe416da281dc08d6c
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_div.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_div:
+
+elementwise_div
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_div
+
+
diff --git a/doc/fluid/api/paddle/elementwise_floordiv.rst b/doc/fluid/api/paddle/elementwise_floordiv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..695ac3f22e5f4abcfad7c7822950e98048cbda2b
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_floordiv.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_floordiv:
+
+elementwise_floordiv
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_floordiv
+
+
diff --git a/doc/fluid/api/paddle/elementwise_mod.rst b/doc/fluid/api/paddle/elementwise_mod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5f0416079d9a4aa298ef82051b080c0921b25dae
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_mod.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_mod:
+
+elementwise_mod
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_mod
+
+
diff --git a/doc/fluid/api/paddle/elementwise_mul.rst b/doc/fluid/api/paddle/elementwise_mul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d049d9d5067321d9ea0c0a07e5871cf66dd5167
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_mul.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_mul:
+
+elementwise_mul
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_mul
+
+
diff --git a/doc/fluid/api/paddle/elementwise_pow.rst b/doc/fluid/api/paddle/elementwise_pow.rst
new file mode 100644
index 0000000000000000000000000000000000000000..742a3a4fbcb64ca09e69c41953b87093bcd07cb5
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_pow.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_pow:
+
+elementwise_pow
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_pow
+
+
diff --git a/doc/fluid/api/paddle/elementwise_sub.rst b/doc/fluid/api/paddle/elementwise_sub.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3aca499fbdf0cfa5956f7f6174aa96b24eb400ea
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_sub.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_sub:
+
+elementwise_sub
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_sub
+
+
diff --git a/doc/fluid/api/paddle/elementwise_sum.rst b/doc/fluid/api/paddle/elementwise_sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a135aca000efb8fd333d879487b19319ed555db
--- /dev/null
+++ b/doc/fluid/api/paddle/elementwise_sum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_elementwise_sum:
+
+elementwise_sum
+-------------------------------
+:doc_source: paddle.tensor.elementwise_sum
+
+
diff --git a/doc/fluid/api/paddle/enable_imperative.rst b/doc/fluid/api/paddle/enable_imperative.rst
new file mode 100644
index 0000000000000000000000000000000000000000..81b39bb938baeed31df3cca6a656e8f34be5137c
--- /dev/null
+++ b/doc/fluid/api/paddle/enable_imperative.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_enable_imperative:
+
+enable_imperative
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.enable_dygraph
+
+
diff --git a/doc/fluid/api/paddle/equal.rst b/doc/fluid/api/paddle/equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5fa7398800f6c9ecd5692f2e0f1ef55e829016e4
--- /dev/null
+++ b/doc/fluid/api/paddle/equal.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_equal:
+
+equal
+-------------------------------
+:doc_source: paddle.tensor.equal
+
+
diff --git a/doc/fluid/api/paddle/equal_all.rst b/doc/fluid/api/paddle/equal_all.rst
new file mode 100644
index 0000000000000000000000000000000000000000..58fc331acc2b3f564dc73bb8c039c17b9b4720f2
--- /dev/null
+++ b/doc/fluid/api/paddle/equal_all.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_equal_all
+
+equal_all
+-------------------------------
+:doc_source: paddle.tensor.equal_all
+
+
diff --git a/doc/fluid/api/paddle/erf.rst b/doc/fluid/api/paddle/erf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9f8736d4dbf93e7010dd4bd8fb8f08ee072f0d6b
--- /dev/null
+++ b/doc/fluid/api/paddle/erf.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_erf:
+
+erf
+-------------------------------
+:doc_source: paddle.fluid.layers.erf
+
+
diff --git a/doc/fluid/api/paddle/exp.rst b/doc/fluid/api/paddle/exp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..468fafefc0edfb861ca224dbae6ecfec3e14f902
--- /dev/null
+++ b/doc/fluid/api/paddle/exp.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_exp:
+
+exp
+-------------------------------
+:doc_source: paddle.fluid.layers.exp
+
+
diff --git a/doc/fluid/api/paddle/expand.rst b/doc/fluid/api/paddle/expand.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4dae2306b0140dbeaad89a4e7dc3cdae91a6b5b0
--- /dev/null
+++ b/doc/fluid/api/paddle/expand.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_expand:
+
+expand
+-------------------------------
+:doc_source: paddle.fluid.layers.expand
+
+
diff --git a/doc/fluid/api/paddle/expand_as.rst b/doc/fluid/api/paddle/expand_as.rst
new file mode 100644
index 0000000000000000000000000000000000000000..02c7a2debbd939182be8bc5083c3a14573231623
--- /dev/null
+++ b/doc/fluid/api/paddle/expand_as.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_expand_as:
+
+expand_as
+-------------------------------
+:doc_source: paddle.fluid.layers.expand_as
+
+
diff --git a/doc/fluid/api/paddle/eye.rst b/doc/fluid/api/paddle/eye.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66a8522df17f7a4d79a04e37361afe05828fef77
--- /dev/null
+++ b/doc/fluid/api/paddle/eye.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_eye:
+
+eye
+-------------------------------
+:doc_source: paddle.fluid.layers.eye
+
+
diff --git a/doc/fluid/api/paddle/fill_constant.rst b/doc/fluid/api/paddle/fill_constant.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0ba3d4267f972bad2d65d8cba001a0c74a5559ef
--- /dev/null
+++ b/doc/fluid/api/paddle/fill_constant.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_fill_constant:
+
+fill_constant
+-------------------------------
+:doc_source: paddle.fluid.layers.fill_constant
+
+
diff --git a/doc/fluid/api/paddle/flatten.rst b/doc/fluid/api/paddle/flatten.rst
new file mode 100644
index 0000000000000000000000000000000000000000..91d560427c2dbda9823980ce7ae9e8fa5b13eb5a
--- /dev/null
+++ b/doc/fluid/api/paddle/flatten.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_flatten:
+
+flatten
+-------------------------------
+:doc_source: paddle.fluid.layers.flatten
+
+
diff --git a/doc/fluid/api/paddle/flip.rst b/doc/fluid/api/paddle/flip.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e251b5b75d2ddad2897243a165c4dbeab76916d3
--- /dev/null
+++ b/doc/fluid/api/paddle/flip.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_flip:
+
+flip
+-------------------------------
+:doc_source: paddle.tensor.flip
+
+
diff --git a/doc/fluid/api/paddle/floor.rst b/doc/fluid/api/paddle/floor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dc9940a76718dcff3b1ea520c150aa6a4c80481c
--- /dev/null
+++ b/doc/fluid/api/paddle/floor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_floor:
+
+floor
+-------------------------------
+:doc_source: paddle.fluid.layers.floor
+
+
diff --git a/doc/fluid/api/paddle/full.rst b/doc/fluid/api/paddle/full.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5a6869012d23b48100b1f9e4bc11c515bd8dad66
--- /dev/null
+++ b/doc/fluid/api/paddle/full.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_full:
+
+full
+-------------------------------
+:doc_source: paddle.fluid.layers.fill_constant
+
+
diff --git a/doc/fluid/api/paddle/full_like.rst b/doc/fluid/api/paddle/full_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7262c2c892c854937ea4ac8b79e2269765ceb580
--- /dev/null
+++ b/doc/fluid/api/paddle/full_like.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_full_like:
+
+full_like
+-------------------------------
+:doc_source: paddle.tensor.full_like
+
+
diff --git a/doc/fluid/api/paddle/gather.rst b/doc/fluid/api/paddle/gather.rst
new file mode 100644
index 0000000000000000000000000000000000000000..954bde40d2fa4303a7febca28e91d001e312b57f
--- /dev/null
+++ b/doc/fluid/api/paddle/gather.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_gather:
+
+gather
+-------------------------------
+:doc_source: paddle.fluid.layers.gather
+
+
diff --git a/doc/fluid/api/paddle/gather_nd.rst b/doc/fluid/api/paddle/gather_nd.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d00261c2cc7068f818665cab53edc54f18404cf0
--- /dev/null
+++ b/doc/fluid/api/paddle/gather_nd.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_gather_nd:
+
+gather_nd
+-------------------------------
+:doc_source: paddle.fluid.layers.gather_nd
+
+
diff --git a/doc/fluid/api/paddle/global_scope.rst b/doc/fluid/api/paddle/global_scope.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb2babc1c492f7cfcd89e409e5d76e0d709e4ec3
--- /dev/null
+++ b/doc/fluid/api/paddle/global_scope.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_global_scope:
+
+global_scope
+-------------------------------
+:doc_source: paddle.fluid.executor.global_scope
+
+
diff --git a/doc/fluid/api/paddle/gradients.rst b/doc/fluid/api/paddle/gradients.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9a65a55b6bb3d9da580696c3be355a6fba29db25
--- /dev/null
+++ b/doc/fluid/api/paddle/gradients.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_gradients:
+
+gradients
+-------------------------------
+:doc_source: paddle.fluid.backward.gradients
+
+
diff --git a/doc/fluid/api/paddle/greater_equal.rst b/doc/fluid/api/paddle/greater_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..54afe57ffab5185fc2c3fb92a671e0b726108ab3
--- /dev/null
+++ b/doc/fluid/api/paddle/greater_equal.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_greater_equal:
+
+greater_equal
+-------------------------------
+:doc_source: paddle.tensor.greater_equal
+
+
diff --git a/doc/fluid/api/paddle/greater_than.rst b/doc/fluid/api/paddle/greater_than.rst
new file mode 100644
index 0000000000000000000000000000000000000000..04a874dd929d7dae274898c87029059b1b1d6261
--- /dev/null
+++ b/doc/fluid/api/paddle/greater_than.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_greater_than:
+
+greater_than
+-------------------------------
+:doc_source: paddle.tensor.greater_than
+
+
diff --git a/doc/fluid/api/paddle/has_inf.rst b/doc/fluid/api/paddle/has_inf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1efcab91e60a1fd229a066dba95d21f81e513c94
--- /dev/null
+++ b/doc/fluid/api/paddle/has_inf.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_has_inf:
+
+has_inf
+-------------------------------
+:doc_source: paddle.fluid.layers.has_inf
+
+
diff --git a/doc/fluid/api/paddle/has_nan.rst b/doc/fluid/api/paddle/has_nan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..59710b0c09a0dce28b1dfaba576f47886f706ee0
--- /dev/null
+++ b/doc/fluid/api/paddle/has_nan.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_has_nan:
+
+has_nan
+-------------------------------
+:doc_source: paddle.fluid.layers.has_nan
+
+
diff --git a/doc/fluid/api/paddle/in_imperative_mode.rst b/doc/fluid/api/paddle/in_imperative_mode.rst
new file mode 100644
index 0000000000000000000000000000000000000000..29a1e5ca5ac7d5e7de5475f90f96772968bca40d
--- /dev/null
+++ b/doc/fluid/api/paddle/in_imperative_mode.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_in_imperative_mode:
+
+in_imperative_mode
+-------------------------------
+:doc_source: paddle.fluid.framework.in_dygraph_mode
+
+
diff --git a/doc/fluid/api/paddle/increment.rst b/doc/fluid/api/paddle/increment.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c63a451ddacd5a51fb9699445bd1bb0071ab331c
--- /dev/null
+++ b/doc/fluid/api/paddle/increment.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_increment:
+
+increment
+-------------------------------
+:doc_source: paddle.fluid.layers.increment
+
+
diff --git a/doc/fluid/api/paddle/index_sample.rst b/doc/fluid/api/paddle/index_sample.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3a1f07978d8e112be98aa6aabb0140ab4f8d0208
--- /dev/null
+++ b/doc/fluid/api/paddle/index_sample.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_index_sample:
+
+index_sample
+-------------------------------
+:doc_source: paddle.tensor.index_sample
+
+
diff --git a/doc/fluid/api/paddle/index_select.rst b/doc/fluid/api/paddle/index_select.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c5486cd2375bd0d7f0baba01c1829c10f66ab45c
--- /dev/null
+++ b/doc/fluid/api/paddle/index_select.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_index_select:
+
+index_select
+-------------------------------
+:doc_source: paddle.tensor.index_select
+
+
diff --git a/doc/fluid/api/paddle/inverse.rst b/doc/fluid/api/paddle/inverse.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8661db22117b638624e16d6a31bcf4eef321a6b4
--- /dev/null
+++ b/doc/fluid/api/paddle/inverse.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_inverse:
+
+inverse
+-------------------------------
+:doc_source: paddle.tensor.inverse
+
+
diff --git a/doc/fluid/api/paddle/is_empty.rst b/doc/fluid/api/paddle/is_empty.rst
new file mode 100644
index 0000000000000000000000000000000000000000..51948a3d0d84391e8d63e176ddaabba026551f98
--- /dev/null
+++ b/doc/fluid/api/paddle/is_empty.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_is_empty:
+
+is_empty
+-------------------------------
+:doc_source: paddle.fluid.layers.is_empty
+
+
diff --git a/doc/fluid/api/paddle/isfinite.rst b/doc/fluid/api/paddle/isfinite.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8e4aa8db89b64be36f6e75ec20363fae90a21dbe
--- /dev/null
+++ b/doc/fluid/api/paddle/isfinite.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_isfinite:
+
+isfinite
+-------------------------------
+:doc_source: paddle.fluid.layers.isfinite
+
+
diff --git a/doc/fluid/api/paddle/kron.rst b/doc/fluid/api/paddle/kron.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8768fd230672dbbecb06ce955ac57d3f1cb37f3f
--- /dev/null
+++ b/doc/fluid/api/paddle/kron.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_kron:
+
+kron
+-------------------------------
+:doc_source: paddle.tensor.kron
+
+
diff --git a/doc/fluid/api/paddle/less_equal.rst b/doc/fluid/api/paddle/less_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3fc5e2ce2b819dfed7ca8b64841836229c86d3e4
--- /dev/null
+++ b/doc/fluid/api/paddle/less_equal.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_less_equal:
+
+less_equal
+-------------------------------
+:doc_source: paddle.tensor.less_equal
+
+
diff --git a/doc/fluid/api/paddle/less_than.rst b/doc/fluid/api/paddle/less_than.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7df6eb441d37a2fe8bf95e43a48df8471115ad2c
--- /dev/null
+++ b/doc/fluid/api/paddle/less_than.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_less_than:
+
+less_than
+-------------------------------
+:doc_source: paddle.tensor.less_than
+
+
diff --git a/doc/fluid/api/paddle/linspace.rst b/doc/fluid/api/paddle/linspace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..268cdf16f5018d8bc4ba840306f710d6f9d6aedb
--- /dev/null
+++ b/doc/fluid/api/paddle/linspace.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_linspace:
+
+linspace
+-------------------------------
+:doc_source: paddle.fluid.layers.linspace
+
+
diff --git a/doc/fluid/api/paddle/load.rst b/doc/fluid/api/paddle/load.rst
new file mode 100644
index 0000000000000000000000000000000000000000..953efbe850dd244fe021e6444cb0f6ef77bd8184
--- /dev/null
+++ b/doc/fluid/api/paddle/load.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.io.load
+
+
diff --git a/doc/fluid/api/paddle/log.rst b/doc/fluid/api/paddle/log.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66856a3b9f8cb83e0cf38e0f55bb83377b0f7412
--- /dev/null
+++ b/doc/fluid/api/paddle/log.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_log:
+
+log
+-------------------------------
+:doc_source: paddle.fluid.layers.log
+
+
diff --git a/doc/fluid/api/paddle/log1p.rst b/doc/fluid/api/paddle/log1p.rst
new file mode 100644
index 0000000000000000000000000000000000000000..543798763a5cb43bdf28cc01810c502cf5d936b4
--- /dev/null
+++ b/doc/fluid/api/paddle/log1p.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_log1p:
+
+log1p
+-------------------------------
+:doc_source: paddle.tensor.log1p
+
+
diff --git a/doc/fluid/api/paddle/logical_and.rst b/doc/fluid/api/paddle/logical_and.rst
new file mode 100644
index 0000000000000000000000000000000000000000..887ea10780f817060fd3b0ffc5d605aef2196408
--- /dev/null
+++ b/doc/fluid/api/paddle/logical_and.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_logical_and:
+
+logical_and
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_and
+
+
diff --git a/doc/fluid/api/paddle/logical_not.rst b/doc/fluid/api/paddle/logical_not.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1b6d74c4b31401ac234b70611eca4f552a6d1404
--- /dev/null
+++ b/doc/fluid/api/paddle/logical_not.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_logical_not:
+
+logical_not
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_not
+
+
diff --git a/doc/fluid/api/paddle/logical_or.rst b/doc/fluid/api/paddle/logical_or.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01cb12f8f7216abd09ab1c2d40ae759a86992dd1
--- /dev/null
+++ b/doc/fluid/api/paddle/logical_or.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_logical_or:
+
+logical_or
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_or
+
+
diff --git a/doc/fluid/api/paddle/logical_xor.rst b/doc/fluid/api/paddle/logical_xor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cde495c6661418167f4ffa5e0afa32dcd8558619
--- /dev/null
+++ b/doc/fluid/api/paddle/logical_xor.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_logical_xor:
+
+logical_xor
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_xor
+
+
diff --git a/doc/fluid/api/paddle/logsumexp.rst b/doc/fluid/api/paddle/logsumexp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..129cb91268ddcacc6c51e69bdfe7e15bebd4554c
--- /dev/null
+++ b/doc/fluid/api/paddle/logsumexp.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_logsumexp:
+
+logsumexp
+-------------------------------
+:doc_source: paddle.tensor.logsumexp
+
+
diff --git a/doc/fluid/api/paddle/manual_seed.rst b/doc/fluid/api/paddle/manual_seed.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7be7452dabf1c72502e76d38e1cdb047beebe2e8
--- /dev/null
+++ b/doc/fluid/api/paddle/manual_seed.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_manual_seed:
+
+manual_seed
+-------------------------------
+:doc_source: paddle.framework.manual_seed
+
+
diff --git a/doc/fluid/api/paddle/matmul.rst b/doc/fluid/api/paddle/matmul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..27235e5599a23802e61014e3096a1fcb733cd474
--- /dev/null
+++ b/doc/fluid/api/paddle/matmul.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_matmul:
+
+matmul
+-------------------------------
+:doc_source: paddle.tensor.matmul
+
+
diff --git a/doc/fluid/api/paddle/max.rst b/doc/fluid/api/paddle/max.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0d28148a8dcc0ac31744450c954e9a125e475add
--- /dev/null
+++ b/doc/fluid/api/paddle/max.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_max:
+
+max
+-------------------------------
+:doc_source: paddle.tensor.max
+
+
diff --git a/doc/fluid/api/paddle/maximum.rst b/doc/fluid/api/paddle/maximum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c85f8a97710efb559e5f73c586eb45798224e8db
--- /dev/null
+++ b/doc/fluid/api/paddle/maximum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_maximum:
+
+maximum
+-------------------------------
+:doc_source: paddle.tensor.maximum
+
+
diff --git a/doc/fluid/api/paddle/mean.rst b/doc/fluid/api/paddle/mean.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0af4250a201baa96f7fa8b117790aa4efbcaf121
--- /dev/null
+++ b/doc/fluid/api/paddle/mean.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_mean:
+
+mean
+-------------------------------
+:doc_source: paddle.fluid.layers.mean
+
+
diff --git a/doc/fluid/api/paddle/meshgrid.rst b/doc/fluid/api/paddle/meshgrid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08bbe433dea3b987748a1aa0a349b7a97a026bf0
--- /dev/null
+++ b/doc/fluid/api/paddle/meshgrid.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_meshgrid:
+
+meshgrid
+-------------------------------
+:doc_source: paddle.tensor.meshgrid
+
+
diff --git a/doc/fluid/api/paddle/min.rst b/doc/fluid/api/paddle/min.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb99109471c0ab684fdd7646fc446abe8aafe6cb
--- /dev/null
+++ b/doc/fluid/api/paddle/min.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_min:
+
+min
+-------------------------------
+:doc_source: paddle.tensor.min
+
+
diff --git a/doc/fluid/api/paddle/minimum.rst b/doc/fluid/api/paddle/minimum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..41391741da78620231f5fe1a9c5ee3ea73ce70be
--- /dev/null
+++ b/doc/fluid/api/paddle/minimum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_minimum:
+
+minimum
+-------------------------------
+:doc_source: paddle.tensor.minimum
+
+
diff --git a/doc/fluid/api/paddle/mm.rst b/doc/fluid/api/paddle/mm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..50650110b6a61ea0eac2575460121e260ce3d85b
--- /dev/null
+++ b/doc/fluid/api/paddle/mm.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_mm:
+
+mm
+-------------------------------
+:doc_source: paddle.fluid.layers.matmul
+
+
diff --git a/doc/fluid/api/paddle/mul.rst b/doc/fluid/api/paddle/mul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7cf24d0dc276736e652afc643237aff41bd3d119
--- /dev/null
+++ b/doc/fluid/api/paddle/mul.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_mul:
+
+mul
+-------------------------------
+:doc_source: paddle.fluid.layers.mul
+
+
diff --git a/doc/fluid/api/paddle/multiplex.rst b/doc/fluid/api/paddle/multiplex.rst
new file mode 100644
index 0000000000000000000000000000000000000000..de115db709418f9fda0d586495f2ebc5ed7de537
--- /dev/null
+++ b/doc/fluid/api/paddle/multiplex.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_multiplex:
+
+multiplex
+-------------------------------
+:doc_source: paddle.fluid.layers.multiplex
+
+
diff --git a/doc/fluid/api/paddle/name_scope.rst b/doc/fluid/api/paddle/name_scope.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7eb875efc0bac9bac54cf63594031c2168843eab
--- /dev/null
+++ b/doc/fluid/api/paddle/name_scope.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_name_scope:
+
+name_scope
+-------------------------------
+:doc_source: paddle.fluid.framework.name_scope
+
+
diff --git a/doc/fluid/api/paddle/nonzero.rst b/doc/fluid/api/paddle/nonzero.rst
new file mode 100644
index 0000000000000000000000000000000000000000..be55e77dece3e727509e1598c0ae384c1f26d4a4
--- /dev/null
+++ b/doc/fluid/api/paddle/nonzero.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_nonzero:
+
+nonzero
+-------------------------------
+:doc_source: paddle.tensor.nonzero
+
+
diff --git a/doc/fluid/api/paddle/norm.rst b/doc/fluid/api/paddle/norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7bff0a32baff3499cb8eff85d30282d750b9b25b
--- /dev/null
+++ b/doc/fluid/api/paddle/norm.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_norm:
+
+norm
+-------------------------------
+:doc_source: paddle.tensor.norm
+
+
diff --git a/doc/fluid/api/paddle/not_equal.rst b/doc/fluid/api/paddle/not_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4fd1cbe809d9dded938f2014124ee9b738b1d9cd
--- /dev/null
+++ b/doc/fluid/api/paddle/not_equal.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_not_equal:
+
+not_equal
+-------------------------------
+:doc_source: paddle.tensor.not_equal
+
+
diff --git a/doc/fluid/api/paddle/ones.rst b/doc/fluid/api/paddle/ones.rst
new file mode 100644
index 0000000000000000000000000000000000000000..557fa15d0958b68b14bca8899e8ad91d473c1dc3
--- /dev/null
+++ b/doc/fluid/api/paddle/ones.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ones:
+
+ones
+-------------------------------
+:doc_source: paddle.fluid.layers.ones
+
+
diff --git a/doc/fluid/api/paddle/ones_like.rst b/doc/fluid/api/paddle/ones_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2365b0dfdddfedcd5292e08588e31184fa52342e
--- /dev/null
+++ b/doc/fluid/api/paddle/ones_like.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_ones_like:
+
+ones_like
+-------------------------------
+:doc_source: paddle.fluid.layers.ones_like
+
+
diff --git a/doc/fluid/api/paddle/pow.rst b/doc/fluid/api/paddle/pow.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f253c66b7b19e046c4215352607d8fa9d111f49a
--- /dev/null
+++ b/doc/fluid/api/paddle/pow.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_pow:
+
+pow
+-------------------------------
+:doc_source: paddle.fluid.layers.pow
+
+
diff --git a/doc/fluid/api/paddle/program_guard.rst b/doc/fluid/api/paddle/program_guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b599489cf7446329405d922182ddd44aa04ba8f
--- /dev/null
+++ b/doc/fluid/api/paddle/program_guard.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_program_guard:
+
+program_guard
+-------------------------------
+:doc_source: paddle.fluid.framework.program_guard
+
+
diff --git a/doc/fluid/api/paddle/py_func.rst b/doc/fluid/api/paddle/py_func.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f87420f5f0470bb90ff8a79a6316fad3d8af0426
--- /dev/null
+++ b/doc/fluid/api/paddle/py_func.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_py_func:
+
+py_func
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.py_func
+
+
diff --git a/doc/fluid/api/paddle/rand.rst b/doc/fluid/api/paddle/rand.rst
new file mode 100644
index 0000000000000000000000000000000000000000..03bb3be2d1733c6cee2b7d528c61d9f7471ac6a4
--- /dev/null
+++ b/doc/fluid/api/paddle/rand.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_rand:
+
+rand
+-------------------------------
+:doc_source: paddle.tensor.rand
+
+
diff --git a/doc/fluid/api/paddle/randint.rst b/doc/fluid/api/paddle/randint.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d94900efab5998d5a4d533c219af5166d770d261
--- /dev/null
+++ b/doc/fluid/api/paddle/randint.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_randint:
+
+randint
+-------------------------------
+:doc_source: paddle.tensor.randint
+
+
diff --git a/doc/fluid/api/paddle/randn.rst b/doc/fluid/api/paddle/randn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1709c9a40209b4e9286712de08d0b9dce33cf4f4
--- /dev/null
+++ b/doc/fluid/api/paddle/randn.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_randn:
+
+randn
+-------------------------------
+:doc_source: paddle.tensor.randn
+
+
diff --git a/doc/fluid/api/paddle/randperm.rst b/doc/fluid/api/paddle/randperm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c47a802ea2c8866fa77908c73071ae11b3676225
--- /dev/null
+++ b/doc/fluid/api/paddle/randperm.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_randperm:
+
+randperm
+-------------------------------
+:doc_source: paddle.tensor.randperm
+
+
diff --git a/doc/fluid/api/paddle/rank.rst b/doc/fluid/api/paddle/rank.rst
new file mode 100644
index 0000000000000000000000000000000000000000..572e95addf457116681fd3527b17dbcb17acc780
--- /dev/null
+++ b/doc/fluid/api/paddle/rank.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_rank:
+
+rank
+-------------------------------
+:doc_source: paddle.fluid.layers.rank
+
+
diff --git a/doc/fluid/api/paddle/reciprocal.rst b/doc/fluid/api/paddle/reciprocal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0617981662b5f4e62e5075119a08890c64cf9d0b
--- /dev/null
+++ b/doc/fluid/api/paddle/reciprocal.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reciprocal:
+
+reciprocal
+-------------------------------
+:doc_source: paddle.fluid.layers.reciprocal
+
+
diff --git a/doc/fluid/api/paddle/reduce_all.rst b/doc/fluid/api/paddle/reduce_all.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aab5eabfe1a4aa7d5a53fc8db53c82dded5b6ffe
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_all.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_all:
+
+reduce_all
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_all
+
+
diff --git a/doc/fluid/api/paddle/reduce_any.rst b/doc/fluid/api/paddle/reduce_any.rst
new file mode 100644
index 0000000000000000000000000000000000000000..daed4faaa6c08cf15ff1982431d5e38969d79d31
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_any.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_any:
+
+reduce_any
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_any
+
+
diff --git a/doc/fluid/api/paddle/reduce_max.rst b/doc/fluid/api/paddle/reduce_max.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d3a2691f6257ec331b1b2b38de13e87b775874b0
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_max.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_max:
+
+reduce_max
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_max
+
+
diff --git a/doc/fluid/api/paddle/reduce_mean.rst b/doc/fluid/api/paddle/reduce_mean.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b532f8a5c70b28ca08aa0285d0d658b6b7569ee8
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_mean.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_mean:
+
+reduce_mean
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_mean
+
+
diff --git a/doc/fluid/api/paddle/reduce_min.rst b/doc/fluid/api/paddle/reduce_min.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4c329ec02fb39caaf046c25c12c5d73557663265
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_min.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_min:
+
+reduce_min
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_min
+
+
diff --git a/doc/fluid/api/paddle/reduce_prod.rst b/doc/fluid/api/paddle/reduce_prod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b1bc9ccdb098d2e3396ca9ccd0db55de60fe53bc
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_prod.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_prod:
+
+reduce_prod
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_prod
+
+
diff --git a/doc/fluid/api/paddle/reduce_sum.rst b/doc/fluid/api/paddle/reduce_sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08237db0e0277f0dea6b218540f06c0e0cc92a27
--- /dev/null
+++ b/doc/fluid/api/paddle/reduce_sum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reduce_sum:
+
+reduce_sum
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_sum
+
+
diff --git a/doc/fluid/api/paddle/reshape.rst b/doc/fluid/api/paddle/reshape.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2f388c010fad9f5c4fe59931cc2306fdb608f710
--- /dev/null
+++ b/doc/fluid/api/paddle/reshape.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reshape:
+
+reshape
+-------------------------------
+:doc_source: paddle.fluid.layers.reshape
+
+
diff --git a/doc/fluid/api/paddle/reverse.rst b/doc/fluid/api/paddle/reverse.rst
new file mode 100644
index 0000000000000000000000000000000000000000..db756f4e14c1acbf1a6ae9d79f22653c7ebba42e
--- /dev/null
+++ b/doc/fluid/api/paddle/reverse.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_reverse:
+
+reverse
+-------------------------------
+:doc_source: paddle.fluid.layers.reverse
+
+
diff --git a/doc/fluid/api/paddle/roll.rst b/doc/fluid/api/paddle/roll.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9efeee975c1f92291ca85d71b18474636da0389b
--- /dev/null
+++ b/doc/fluid/api/paddle/roll.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_roll:
+
+roll
+-------------------------------
+:doc_source: paddle.tensor.roll
+
+
diff --git a/doc/fluid/api/paddle/round.rst b/doc/fluid/api/paddle/round.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1ad15979079f35885e880524731724eaab4fd553
--- /dev/null
+++ b/doc/fluid/api/paddle/round.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_round:
+
+round
+-------------------------------
+:doc_source: paddle.fluid.layers.round
+
+
diff --git a/doc/fluid/api/paddle/rsqrt.rst b/doc/fluid/api/paddle/rsqrt.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ae0ac79b879069991464b8e8d61a5f9d5e530c2b
--- /dev/null
+++ b/doc/fluid/api/paddle/rsqrt.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_rsqrt:
+
+rsqrt
+-------------------------------
+:doc_source: paddle.fluid.layers.rsqrt
+
+
diff --git a/doc/fluid/api/paddle/save.rst b/doc/fluid/api/paddle/save.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66bade1e4515407883c381136628da5d43382670
--- /dev/null
+++ b/doc/fluid/api/paddle/save.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.save
+
+
diff --git a/doc/fluid/api/paddle/scale.rst b/doc/fluid/api/paddle/scale.rst
new file mode 100644
index 0000000000000000000000000000000000000000..98c49a0d821c8cb5a6671b17857c198d6c6c0ac5
--- /dev/null
+++ b/doc/fluid/api/paddle/scale.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_scale:
+
+scale
+-------------------------------
+:doc_source: paddle.fluid.layers.scale
+
+
diff --git a/doc/fluid/api/paddle/scatter.rst b/doc/fluid/api/paddle/scatter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d43359b2471fb64c16c926aaf3964f84c2147fa0
--- /dev/null
+++ b/doc/fluid/api/paddle/scatter.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_scatter:
+
+scatter
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter
+
+
diff --git a/doc/fluid/api/paddle/scatter_nd.rst b/doc/fluid/api/paddle/scatter_nd.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f54a82400eeee0b30c0c081deb29e041e3798524
--- /dev/null
+++ b/doc/fluid/api/paddle/scatter_nd.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_scatter_nd:
+
+scatter_nd
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter_nd
+
+
diff --git a/doc/fluid/api/paddle/scatter_nd_add.rst b/doc/fluid/api/paddle/scatter_nd_add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..33ef5a3df9346f77d4f588c8c9b4a4e64be3660e
--- /dev/null
+++ b/doc/fluid/api/paddle/scatter_nd_add.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_scatter_nd_add:
+
+scatter_nd_add
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter_nd_add
+
+
diff --git a/doc/fluid/api/paddle/scope_guard.rst b/doc/fluid/api/paddle/scope_guard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e832b0198d066dfbaefb1c9f741658fdbdfcb59a
--- /dev/null
+++ b/doc/fluid/api/paddle/scope_guard.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_scope_guard:
+
+scope_guard
+-------------------------------
+:doc_source: paddle.fluid.executor.scope_guard
+
+
diff --git a/doc/fluid/api/paddle/shape.rst b/doc/fluid/api/paddle/shape.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e42db48db15c3274e6be9c837432e70e981ebd87
--- /dev/null
+++ b/doc/fluid/api/paddle/shape.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_shape:
+
+shape
+-------------------------------
+:doc_source: paddle.fluid.layers.shape
+
+
diff --git a/doc/fluid/api/paddle/shard_index.rst b/doc/fluid/api/paddle/shard_index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1bb05638b0879ba57b3bb634bf4f6cd210c15825
--- /dev/null
+++ b/doc/fluid/api/paddle/shard_index.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_shard_index:
+
+shard_index
+-------------------------------
+:doc_source: paddle.fluid.layers.shard_index
+
+
diff --git a/doc/fluid/api/paddle/shuffle.rst b/doc/fluid/api/paddle/shuffle.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f08502a88f7bc8fdd59aa6921ffcdca71dd1079d
--- /dev/null
+++ b/doc/fluid/api/paddle/shuffle.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_shuffle:
+
+shuffle
+-------------------------------
+:doc_source: paddle.fluid.io.shuffle
+
+
diff --git a/doc/fluid/api/paddle/sign.rst b/doc/fluid/api/paddle/sign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e59f0d96ce524c36f3ed969912b4f6694edf5f99
--- /dev/null
+++ b/doc/fluid/api/paddle/sign.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sign:
+
+sign
+-------------------------------
+:doc_source: paddle.fluid.layers.sign
+
+
diff --git a/doc/fluid/api/paddle/sin.rst b/doc/fluid/api/paddle/sin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2ec381ac7004a83c3ebb2bfc972c09c031facaa5
--- /dev/null
+++ b/doc/fluid/api/paddle/sin.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sin:
+
+sin
+-------------------------------
+:doc_source: paddle.fluid.layers.sin
+
+
diff --git a/doc/fluid/api/paddle/slice.rst b/doc/fluid/api/paddle/slice.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2e61c444471670a7800259820c719841511ccbce
--- /dev/null
+++ b/doc/fluid/api/paddle/slice.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_slice:
+
+slice
+-------------------------------
+:doc_source: paddle.fluid.layers.slice
+
+
diff --git a/doc/fluid/api/paddle/sort.rst b/doc/fluid/api/paddle/sort.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5f87357ccb39b52e975ef73c33b557f220c292a2
--- /dev/null
+++ b/doc/fluid/api/paddle/sort.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sort:
+
+sort
+-------------------------------
+:doc_source: paddle.tensor.sort
+
+
diff --git a/doc/fluid/api/paddle/split.rst b/doc/fluid/api/paddle/split.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2655cd709bd157a1d6f56d9414ed31baef7cb489
--- /dev/null
+++ b/doc/fluid/api/paddle/split.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_split:
+
+split
+-------------------------------
+:doc_source: paddle.fluid.layers.split
+
+
diff --git a/doc/fluid/api/paddle/sqrt.rst b/doc/fluid/api/paddle/sqrt.rst
new file mode 100644
index 0000000000000000000000000000000000000000..040365fc1e1c8079daf1b0b2783fe7cca21c70be
--- /dev/null
+++ b/doc/fluid/api/paddle/sqrt.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sqrt:
+
+sqrt
+-------------------------------
+:doc_source: paddle.fluid.layers.sqrt
+
+
diff --git a/doc/fluid/api/paddle/square.rst b/doc/fluid/api/paddle/square.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1be0cdd5f263974ad7cb677d1a906866f3103df9
--- /dev/null
+++ b/doc/fluid/api/paddle/square.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_square:
+
+square
+-------------------------------
+:doc_source: paddle.fluid.layers.square
+
+
diff --git a/doc/fluid/api/paddle/squeeze.rst b/doc/fluid/api/paddle/squeeze.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9ab4a189f16fea1ab483b161ded453a4894a7bff
--- /dev/null
+++ b/doc/fluid/api/paddle/squeeze.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_squeeze:
+
+squeeze
+-------------------------------
+:doc_source: paddle.fluid.layers.squeeze
+
+
diff --git a/doc/fluid/api/paddle/stack.rst b/doc/fluid/api/paddle/stack.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2bee2c49f255c3b34debfc55d4f22904c654db9b
--- /dev/null
+++ b/doc/fluid/api/paddle/stack.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_stack:
+
+stack
+-------------------------------
+:doc_source: paddle.fluid.layers.stack
+
+
diff --git a/doc/fluid/api/paddle/stanh.rst b/doc/fluid/api/paddle/stanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2244280fb1c611c44ad3dd94adfef492c587ace2
--- /dev/null
+++ b/doc/fluid/api/paddle/stanh.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_stanh:
+
+stanh
+-------------------------------
+:doc_source: paddle.fluid.layers.stanh
+
+
diff --git a/doc/fluid/api/paddle/std.rst b/doc/fluid/api/paddle/std.rst
new file mode 100644
index 0000000000000000000000000000000000000000..68766f7557678952879dd66aea5f85d5fdd4a170
--- /dev/null
+++ b/doc/fluid/api/paddle/std.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_std:
+
+std
+-------------------------------
+:doc_source: paddle.tensor.std
+
+
diff --git a/doc/fluid/api/paddle/strided_slice.rst b/doc/fluid/api/paddle/strided_slice.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1e445a5d4107fccbc01d064199eb688353dfad32
--- /dev/null
+++ b/doc/fluid/api/paddle/strided_slice.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_strided_slice:
+
+strided_slice
+-------------------------------
+:doc_source: paddle.fluid.layers.strided_slice
+
+
diff --git a/doc/fluid/api/paddle/sum.rst b/doc/fluid/api/paddle/sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..461dccf9197b86a3e576a9e37d05902bfd4c4fa1
--- /dev/null
+++ b/doc/fluid/api/paddle/sum.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sum:
+
+sum
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_sum
+
+
diff --git a/doc/fluid/api/paddle/sums.rst b/doc/fluid/api/paddle/sums.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6c4c33f6efa59c0bb4e9725cfd5e0f74bbd37040
--- /dev/null
+++ b/doc/fluid/api/paddle/sums.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_sums:
+
+sums
+-------------------------------
+:doc_source: paddle.fluid.layers.sums
+
+
diff --git a/doc/fluid/api/paddle/t.rst b/doc/fluid/api/paddle/t.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4958e6c74f8db3d78289639a562fe6831ca262a2
--- /dev/null
+++ b/doc/fluid/api/paddle/t.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_t:
+
+t
+-------------------------------
+:doc_source: paddle.tensor.t
+
+
diff --git a/doc/fluid/api/paddle/tanh.rst b/doc/fluid/api/paddle/tanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..03184f5ef8809599f0f5a379a061a257febdfb0c
--- /dev/null
+++ b/doc/fluid/api/paddle/tanh.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_tanh:
+
+tanh
+-------------------------------
+:doc_source: paddle.fluid.layers.tanh
+
+
diff --git a/doc/fluid/api/paddle/topk.rst b/doc/fluid/api/paddle/topk.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4da4e8dc490d5028de2bd97cf8b88c9641d867df
--- /dev/null
+++ b/doc/fluid/api/paddle/topk.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_topk:
+
+topk
+-------------------------------
+:doc_source: paddle.fluid.layers.topk
+
+
diff --git a/doc/fluid/api/paddle/trace.rst b/doc/fluid/api/paddle/trace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dff070964b091ec9790409a5600f9047e084cda3
--- /dev/null
+++ b/doc/fluid/api/paddle/trace.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_trace:
+
+trace
+-------------------------------
+:doc_source: paddle.tensor.trace
+
+
diff --git a/doc/fluid/api/paddle/transpose.rst b/doc/fluid/api/paddle/transpose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1735e2fd9ba2b66ebf1e36569f3a8b5f12d77160
--- /dev/null
+++ b/doc/fluid/api/paddle/transpose.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_transpose:
+
+transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.transpose
+
+
diff --git a/doc/fluid/api/paddle/tril.rst b/doc/fluid/api/paddle/tril.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cc966a8c2a0810385e4d4d6c542a0e98bd9e0333
--- /dev/null
+++ b/doc/fluid/api/paddle/tril.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_tril:
+
+tril
+-------------------------------
+:doc_source: paddle.tensor.tril
+
+
diff --git a/doc/fluid/api/paddle/triu.rst b/doc/fluid/api/paddle/triu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4f3d9cfc38e223a75a004cc8ae1c99dc62550362
--- /dev/null
+++ b/doc/fluid/api/paddle/triu.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_triu:
+
+triu
+-------------------------------
+:doc_source: paddle.tensor.triu
+
+
diff --git a/doc/fluid/api/paddle/unbind.rst b/doc/fluid/api/paddle/unbind.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d394d3140260eb3ea58631fb90ad7849cce872db
--- /dev/null
+++ b/doc/fluid/api/paddle/unbind.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_unbind:
+
+unbind
+-------------------------------
+:doc_source: paddle.tensor.unbind
+
+
diff --git a/doc/fluid/api/paddle/unique.rst b/doc/fluid/api/paddle/unique.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2b0659ce29eeb86bd35597f64c7d69cd2f2172ed
--- /dev/null
+++ b/doc/fluid/api/paddle/unique.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_unique:
+
+unique
+-------------------------------
+:doc_source: paddle.fluid.layers.unique
+
+
diff --git a/doc/fluid/api/paddle/unique_with_counts.rst b/doc/fluid/api/paddle/unique_with_counts.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3157d27177969afdc5ddf75f4290c75de12a87fd
--- /dev/null
+++ b/doc/fluid/api/paddle/unique_with_counts.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_unique_with_counts:
+
+unique_with_counts
+-------------------------------
+:doc_source: paddle.fluid.layers.unique_with_counts
+
+
diff --git a/doc/fluid/api/paddle/unsqueeze.rst b/doc/fluid/api/paddle/unsqueeze.rst
new file mode 100644
index 0000000000000000000000000000000000000000..caccc1d7f13865c6aa9003f17e483029d975a4eb
--- /dev/null
+++ b/doc/fluid/api/paddle/unsqueeze.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_unsqueeze:
+
+unsqueeze
+-------------------------------
+:doc_source: paddle.fluid.layers.unsqueeze
+
+
diff --git a/doc/fluid/api/paddle/unstack.rst b/doc/fluid/api/paddle/unstack.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c21ac4dff3d80327d36bfe97755dfe7850386404
--- /dev/null
+++ b/doc/fluid/api/paddle/unstack.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_unstack:
+
+unstack
+-------------------------------
+:doc_source: paddle.fluid.layers.unstack
+
+
diff --git a/doc/fluid/api/paddle/var.rst b/doc/fluid/api/paddle/var.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3ec924c0e4e077b53842b9ca41a550af7c030666
--- /dev/null
+++ b/doc/fluid/api/paddle/var.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_var:
+
+var
+-------------------------------
+:doc_source: paddle.tensor.var
+
+
diff --git a/doc/fluid/api/paddle/where.rst b/doc/fluid/api/paddle/where.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f7080671dc15152fc37fc4b207c3184f89bd25fe
--- /dev/null
+++ b/doc/fluid/api/paddle/where.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_where:
+
+where
+-------------------------------
+:doc_source: paddle.fluid.layers.cond
+
+
diff --git a/doc/fluid/api/paddle/zeros.rst b/doc/fluid/api/paddle/zeros.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9319d948b7a20b235ba54e0ff7c8203db65c9fcb
--- /dev/null
+++ b/doc/fluid/api/paddle/zeros.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_zeros:
+
+zeros
+-------------------------------
+:doc_source: paddle.fluid.layers.zeros
+
+
diff --git a/doc/fluid/api/paddle/zeros_like.rst b/doc/fluid/api/paddle/zeros_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75f97e82ffde14d73e5f2fa40a624930b34d4a57
--- /dev/null
+++ b/doc/fluid/api/paddle/zeros_like.rst
@@ -0,0 +1,7 @@
+.. _api_paddle_zeros_like:
+
+zeros_like
+-------------------------------
+:doc_source: paddle.fluid.layers.zeros_like
+
+
diff --git a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst b/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst
deleted file mode 100644
index af467c260db3ef0942076b9e001e806d9adc9cbb..0000000000000000000000000000000000000000
--- a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_file.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_recordio_writer_convert_reader_to_recordio_file:
-
-convert_reader_to_recordio_file
--------------------------------
-
-.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_file
- :noindex:
-
diff --git a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst b/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst
deleted file mode 100644
index a3a58d24fa7e329fa69581a979701f836ada65eb..0000000000000000000000000000000000000000
--- a/doc/fluid/api/recordio_writer/convert_reader_to_recordio_files.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_recordio_writer_convert_reader_to_recordio_files:
-
-convert_reader_to_recordio_files
---------------------------------
-
-.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_files
- :noindex:
-
diff --git a/doc/fluid/api/review_tmp.rst b/doc/fluid/api/review_tmp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e39366bcef08a15baa15c3cfbb318022a2dc47b2
--- /dev/null
+++ b/doc/fluid/api/review_tmp.rst
@@ -0,0 +1,9 @@
+=================
+paddle.review_tmp
+=================
+
+.. toctree::
+ :maxdepth: 1
+
+ review_tmp/MarginRankingLoss.rst
+ review_tmp/margin_ranking_loss.rst
diff --git a/doc/fluid/api/review_tmp/MarginRankingLoss.rst b/doc/fluid/api/review_tmp/MarginRankingLoss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..edc5d1cc57c85be5eb37312c6dc9b8b204b4d9b1
--- /dev/null
+++ b/doc/fluid/api/review_tmp/MarginRankingLoss.rst
@@ -0,0 +1,9 @@
+.. _api_nn_loss_MarginRankingLoss_tmp:
+
+MarginRankingLoss
+-----------------
+
+.. autoclass:: paddle.nn.loss.MarginRankingLoss
+ :members:
+ :inherited-members:
+ :noindex:
diff --git a/doc/fluid/api/review_tmp/margin_ranking_loss.rst b/doc/fluid/api/review_tmp/margin_ranking_loss.rst
new file mode 100644
index 0000000000000000000000000000000000000000..289d1928bf05925dc81238c7ff0dad2623a4d3fc
--- /dev/null
+++ b/doc/fluid/api/review_tmp/margin_ranking_loss.rst
@@ -0,0 +1,7 @@
+.. _api_nn_functional_margin_ranking_loss_tmp:
+
+margin_ranking_loss
+-------------------
+
+.. autofunction:: paddle.nn.functional.margin_ranking_loss
+ :noindex:
diff --git a/doc/fluid/api/static.rst b/doc/fluid/api/static.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fe60795ac04c3a01002be6da5c4d1a94d0fcbf9d
--- /dev/null
+++ b/doc/fluid/api/static.rst
@@ -0,0 +1,12 @@
+=======================
+paddle.static
+=======================
+
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ static/data.rst
+ static/InputSpec.rst
\ No newline at end of file
diff --git a/doc/fluid/api/static/InputSpec.rst b/doc/fluid/api/static/InputSpec.rst
new file mode 100644
index 0000000000000000000000000000000000000000..89da356c9dd82fdf7e32a0f25e1ccc6601138c80
--- /dev/null
+++ b/doc/fluid/api/static/InputSpec.rst
@@ -0,0 +1,8 @@
+.. _api_static_InputSpec:
+
+InputSpec
+------------
+
+.. autoclass:: paddle.static.InputSpec
+ :members:
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/static/data.rst b/doc/fluid/api/static/data.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5555d9314555acfb69fadcc6ee5edff47fa2dea8
--- /dev/null
+++ b/doc/fluid/api/static/data.rst
@@ -0,0 +1,7 @@
+.. _api_static_data:
+
+data
+------------
+
+.. autofunction:: paddle.static.data
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/tensor.rst b/doc/fluid/api/tensor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b79132329944bd42e8573bf65b2fbc58d2a155f9
--- /dev/null
+++ b/doc/fluid/api/tensor.rst
@@ -0,0 +1,133 @@
+=============
+paddle.tensor
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ tensor/abs.rst
+ tensor/acos.rst
+ tensor/add.rst
+ tensor/arange.rst
+ tensor/argmax.rst
+ tensor/argmin.rst
+ tensor/argsort.rst
+ tensor/asin.rst
+ tensor/atan.rst
+ tensor/cast.rst
+ tensor/ceil.rst
+ tensor/chunk.rst
+ tensor/concat.rst
+ tensor/cos.rst
+ tensor/create_tensor.rst
+ tensor/crop_tensor.rst
+ tensor/cross.rst
+ tensor/cumsum.rst
+ tensor/diag.rst
+ tensor/div.rst
+ tensor/elementwise_add.rst
+ tensor/elementwise_div.rst
+ tensor/elementwise_floordiv.rst
+ tensor/elementwise_mod.rst
+ tensor/elementwise_mul.rst
+ tensor/elementwise_pow.rst
+ tensor/elementwise_sub.rst
+ tensor/equal_all.rst
+ tensor/erf.rst
+ tensor/exp.rst
+ tensor/expand.rst
+ tensor/expand_as.rst
+ tensor/eye.rst
+ tensor/fill_constant.rst
+ tensor/flatten.rst
+ tensor/floor.rst
+ tensor/full.rst
+ tensor/full_like.rst
+ tensor/gather.rst
+ tensor/gather_nd.rst
+ tensor/greater_equal.rst
+ tensor/greater_than.rst
+ tensor/has_inf.rst
+ tensor/has_nan.rst
+ tensor/increment.rst
+ tensor/is_empty.rst
+ tensor/index_select.rst
+ tensor/isfinite.rst
+ tensor/isinf.rst
+ tensor/isnan.rst
+ tensor/less_equal.rst
+ tensor/less_than.rst
+ tensor/logic.rst
+ tensor/linalg.rst
+ tensor/linspace.rst
+ tensor/load.rst
+ tensor/log.rst
+ tensor/logical_and.rst
+ tensor/logical_not.rst
+ tensor/logical_or.rst
+ tensor/logical_xor.rst
+ tensor/math.rst
+ tensor/masked_select.rst
+ tensor/max.rst
+ tensor/maximum.rst
+ tensor/mean.rst
+ tensor/min.rst
+ tensor/minimum.rst
+ tensor/mm.rst
+ tensor/mul.rst
+ tensor/multiplex.rst
+ tensor/norm.rst
+ tensor/not_equal.rst
+ tensor/ones.rst
+ tensor/ones_like.rst
+ tensor/numel.rst
+ tensor/pow.rst
+ tensor/random.rst
+ tensor/rank.rst
+ tensor/reciprocal.rst
+ tensor/reduce_all.rst
+ tensor/reduce_any.rst
+ tensor/reduce_max.rst
+ tensor/reduce_mean.rst
+ tensor/reduce_min.rst
+ tensor/reduce_prod.rst
+ tensor/reduce_sum.rst
+ tensor/reshape.rst
+ tensor/reverse.rst
+ tensor/round.rst
+ tensor/rsqrt.rst
+ tensor/save.rst
+ tensor/scale.rst
+ tensor/scatter.rst
+ tensor/scatter_nd.rst
+ tensor/scatter_nd_add.rst
+ tensor/search.rst
+ tensor/shape.rst
+ tensor/shard_index.rst
+ tensor/shuffle.rst
+ tensor/sign.rst
+ tensor/sin.rst
+ tensor/slice.rst
+ tensor/sort.rst
+ tensor/split.rst
+ tensor/sqrt.rst
+ tensor/square.rst
+ tensor/squeeze.rst
+ tensor/stack.rst
+ tensor/stanh.rst
+ tensor/std.rst
+ tensor/stat.rst
+ tensor/strided_slice.rst
+ tensor/sum.rst
+ tensor/sums.rst
+ tensor/tanh.rst
+ tensor/topk.rst
+ tensor/transpose.rst
+ tensor/unique.rst
+ tensor/unique_with_counts.rst
+ tensor/unsqueeze.rst
+ tensor/unstack.rst
+ tensor/var.rst
+ tensor/where.rst
+ tensor/zeros.rst
+ tensor/zeros_like.rst
diff --git a/doc/fluid/api/tensor/abs.rst b/doc/fluid/api/tensor/abs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..61b357dc809c72ccf710f7b3e467ce0b6a1b49df
--- /dev/null
+++ b/doc/fluid/api/tensor/abs.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_abs:
+
+abs
+-------------------------------
+:doc_source: paddle.fluid.layers.abs
+
+
diff --git a/doc/fluid/api/tensor/acos.rst b/doc/fluid/api/tensor/acos.rst
new file mode 100644
index 0000000000000000000000000000000000000000..58ba3bbfb91ef4d02d87a7d7898e85b855818441
--- /dev/null
+++ b/doc/fluid/api/tensor/acos.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_acos:
+
+acos
+-------------------------------
+:doc_source: paddle.fluid.layers.acos
+
+
diff --git a/doc/fluid/api/tensor/add.rst b/doc/fluid/api/tensor/add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..223268b1ef43675ee87e8ba6bf5b75b46e5008aa
--- /dev/null
+++ b/doc/fluid/api/tensor/add.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_add:
+
+add
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_add
+
+
diff --git a/doc/fluid/api/tensor/arange.rst b/doc/fluid/api/tensor/arange.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d15d63965f41a9de6debf789b4792a13634e4c2
--- /dev/null
+++ b/doc/fluid/api/tensor/arange.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_arange:
+
+arange
+-------------------------------
+:doc_source: paddle.fluid.layers.range
+
+
diff --git a/doc/fluid/api/tensor/argmax.rst b/doc/fluid/api/tensor/argmax.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8c4e15720bdeb2540f4d01026a5d68985cd32f05
--- /dev/null
+++ b/doc/fluid/api/tensor/argmax.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_argmax:
+
+argmax
+------
+
+.. autofunction:: paddle.tensor.search.argmax
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/argmin.rst b/doc/fluid/api/tensor/argmin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c23f6127472a2a33acddfb35b7f8a21be9353f42
--- /dev/null
+++ b/doc/fluid/api/tensor/argmin.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_argmin:
+
+argmin
+------
+
+.. autofunction:: paddle.tensor.search.argmin
+ :noindex:
+
+
diff --git a/doc/fluid/api/tensor/argsort.rst b/doc/fluid/api/tensor/argsort.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2168777783e8ff4a2ba5e217ce3f9982f4f97d8f
--- /dev/null
+++ b/doc/fluid/api/tensor/argsort.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_argsort:
+
+argsort
+-------------------------------
+:doc_source: paddle.tensor.argsort
+
+
diff --git a/doc/fluid/api/tensor/asin.rst b/doc/fluid/api/tensor/asin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8fca23d5ab12687ad8f2a5339d46c258aaab6ee6
--- /dev/null
+++ b/doc/fluid/api/tensor/asin.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_asin:
+
+asin
+-------------------------------
+:doc_source: paddle.fluid.layers.asin
+
+
diff --git a/doc/fluid/api/tensor/atan.rst b/doc/fluid/api/tensor/atan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d26a40d1244ef1dec0e271148727c729d9d9f6cd
--- /dev/null
+++ b/doc/fluid/api/tensor/atan.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_atan:
+
+atan
+-------------------------------
+:doc_source: paddle.fluid.layers.atan
+
+
diff --git a/doc/fluid/api/tensor/cast.rst b/doc/fluid/api/tensor/cast.rst
new file mode 100644
index 0000000000000000000000000000000000000000..15182703078a8fa28736f6d6e0636775dee09e78
--- /dev/null
+++ b/doc/fluid/api/tensor/cast.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_cast:
+
+cast
+-------------------------------
+:doc_source: paddle.fluid.layers.cast
+
+
diff --git a/doc/fluid/api/tensor/ceil.rst b/doc/fluid/api/tensor/ceil.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f570078751ca28f9d07b046261f4e010342b4e66
--- /dev/null
+++ b/doc/fluid/api/tensor/ceil.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_ceil:
+
+ceil
+-------------------------------
+:doc_source: paddle.fluid.layers.ceil
+
+
diff --git a/doc/fluid/api/tensor/chunk.rst b/doc/fluid/api/tensor/chunk.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5638cd48cf98b2e2433ab9e500f7f5dc3aa8e255
--- /dev/null
+++ b/doc/fluid/api/tensor/chunk.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_chunk:
+
+chunk
+-------
+
+.. autofunction:: paddle.tensor.manipulation.chunk
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/concat.rst b/doc/fluid/api/tensor/concat.rst
new file mode 100644
index 0000000000000000000000000000000000000000..88bc288edaa3b74aab58780dcda9f942ff840c7b
--- /dev/null
+++ b/doc/fluid/api/tensor/concat.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_concat:
+
+concat
+--------
+
+.. autofunction:: paddle.tensor.manipulation.concat
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/cos.rst b/doc/fluid/api/tensor/cos.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8eb9afad82ed6a492c5032ada95dccb19ec71c69
--- /dev/null
+++ b/doc/fluid/api/tensor/cos.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_cos:
+
+cos
+-------------------------------
+:doc_source: paddle.fluid.layers.cos
+
+
diff --git a/doc/fluid/api/tensor/create_tensor.rst b/doc/fluid/api/tensor/create_tensor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..344f6fefcad4639b21e6bbab03201eb057ce37bb
--- /dev/null
+++ b/doc/fluid/api/tensor/create_tensor.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_create_tensor:
+
+create_tensor
+-------------------------------
+:doc_source: paddle.fluid.layers.create_tensor
+
+
diff --git a/doc/fluid/api/tensor/crop_tensor.rst b/doc/fluid/api/tensor/crop_tensor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c6b72a4cb69c9bc34b30c53f87562c2e941f8e5
--- /dev/null
+++ b/doc/fluid/api/tensor/crop_tensor.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_crop_tensor:
+
+crop_tensor
+-------------------------------
+:doc_source: paddle.fluid.layers.crop_tensor
+
+
diff --git a/doc/fluid/api/tensor/cross.rst b/doc/fluid/api/tensor/cross.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3bb049f74d7232bd42020fee1b702c313395ba85
--- /dev/null
+++ b/doc/fluid/api/tensor/cross.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_cos:
+
+cross
+-------------------------------
+:doc_source: paddle.tensor.cross
+
+
diff --git a/doc/fluid/api/tensor/cumsum.rst b/doc/fluid/api/tensor/cumsum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..96c1bf0abf8c06621b93624941025e4929652add
--- /dev/null
+++ b/doc/fluid/api/tensor/cumsum.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_cumsum:
+
+cumsum
+-------------------------------
+:doc_source: paddle.tensor.cumsum
+
+
diff --git a/doc/fluid/api/tensor/diag.rst b/doc/fluid/api/tensor/diag.rst
new file mode 100644
index 0000000000000000000000000000000000000000..36b4b3b01443b33a26d363da39b9d781b37ea1fb
--- /dev/null
+++ b/doc/fluid/api/tensor/diag.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_diag:
+
+diag
+-------------------------------
+:doc_source: paddle.fluid.layers.diag
+
+
diff --git a/doc/fluid/api/tensor/div.rst b/doc/fluid/api/tensor/div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f3c0e2a1d13e4a5ff29bd9df6b086cee3dd2b511
--- /dev/null
+++ b/doc/fluid/api/tensor/div.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_div:
+
+div
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_div
+
+
diff --git a/doc/fluid/api/tensor/elementwise_add.rst b/doc/fluid/api/tensor/elementwise_add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3c517357f9e8eb560f4750163ee4d6dd278e095f
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_add.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_add:
+
+elementwise_add
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_add
+
+
diff --git a/doc/fluid/api/tensor/elementwise_div.rst b/doc/fluid/api/tensor/elementwise_div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9095a5380b1f9be1bb9ab6372fecc6ee27084b41
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_div.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_div:
+
+elementwise_div
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_div
+
+
diff --git a/doc/fluid/api/tensor/elementwise_floordiv.rst b/doc/fluid/api/tensor/elementwise_floordiv.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9a0b8b433dcd8c43d8d439a969195d62f5f73158
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_floordiv.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_floordiv:
+
+elementwise_floordiv
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_floordiv
+
+
diff --git a/doc/fluid/api/tensor/elementwise_mod.rst b/doc/fluid/api/tensor/elementwise_mod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..05866ce901ae0c9493732f5fb7813b090534c841
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_mod.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_mod:
+
+elementwise_mod
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_mod
+
+
diff --git a/doc/fluid/api/tensor/elementwise_mul.rst b/doc/fluid/api/tensor/elementwise_mul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb5c52e3b212241b2e5bb84f487dcc6bd66a28d6
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_mul.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_mul:
+
+elementwise_mul
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_mul
+
+
diff --git a/doc/fluid/api/tensor/elementwise_pow.rst b/doc/fluid/api/tensor/elementwise_pow.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b8c22385f1d6ae69a8d4c984aec79f3023e9b46f
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_pow.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_pow:
+
+elementwise_pow
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_pow
+
+
diff --git a/doc/fluid/api/tensor/elementwise_sub.rst b/doc/fluid/api/tensor/elementwise_sub.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4ff7a2e9e347f96bb7008517adb55e5c20cd9cbd
--- /dev/null
+++ b/doc/fluid/api/tensor/elementwise_sub.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_elementwise_sub:
+
+elementwise_sub
+-------------------------------
+:doc_source: paddle.fluid.layers.elementwise_sub
+
+
diff --git a/doc/fluid/api/tensor/equal_all.rst b/doc/fluid/api/tensor/equal_all.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5149e6101d64b1e2c8626a1d35693fd503b2d230
--- /dev/null
+++ b/doc/fluid/api/tensor/equal_all.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_equal_all:
+
+equal_all
+-------------------------------
+:doc_source: paddle.tensor.equal_all
+
+
diff --git a/doc/fluid/api/tensor/erf.rst b/doc/fluid/api/tensor/erf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..af13a5d7b6fa9628be88c0f8c3e812d32a94b1bd
--- /dev/null
+++ b/doc/fluid/api/tensor/erf.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_erf:
+
+erf
+-------------------------------
+:doc_source: paddle.fluid.layers.erf
+
+
diff --git a/doc/fluid/api/tensor/exp.rst b/doc/fluid/api/tensor/exp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1f3b948c1a4270b720e4275972cd10041aac8b76
--- /dev/null
+++ b/doc/fluid/api/tensor/exp.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_exp:
+
+exp
+-------------------------------
+:doc_source: paddle.fluid.layers.exp
+
+
diff --git a/doc/fluid/api/tensor/expand.rst b/doc/fluid/api/tensor/expand.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67dbb96e6ef71b9523fe7392abbe4a984aede6d7
--- /dev/null
+++ b/doc/fluid/api/tensor/expand.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_expand:
+
+expand
+-------------------------------
+:doc_source: paddle.fluid.layers.expand
+
+
diff --git a/doc/fluid/api/tensor/expand_as.rst b/doc/fluid/api/tensor/expand_as.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97e2e74e9599a5091a4cbc54e1a27ae95ff4d855
--- /dev/null
+++ b/doc/fluid/api/tensor/expand_as.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_expand_as:
+
+expand_as
+-------------------------------
+:doc_source: paddle.fluid.layers.expand_as
+
+
diff --git a/doc/fluid/api/tensor/eye.rst b/doc/fluid/api/tensor/eye.rst
new file mode 100644
index 0000000000000000000000000000000000000000..be5125af675a52d6a6525e33e40d8f58bd965759
--- /dev/null
+++ b/doc/fluid/api/tensor/eye.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_eye:
+
+eye
+--------
+
+.. autofunction:: paddle.tensor.creation.eye
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/fill_constant.rst b/doc/fluid/api/tensor/fill_constant.rst
new file mode 100644
index 0000000000000000000000000000000000000000..db2f80ec780942b244549a673511a053c4a7f6c9
--- /dev/null
+++ b/doc/fluid/api/tensor/fill_constant.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_fill_constant:
+
+fill_constant
+-------------------------------
+:doc_source: paddle.fluid.layers.fill_constant
+
+
diff --git a/doc/fluid/api/tensor/flatten.rst b/doc/fluid/api/tensor/flatten.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c239c754791688d24d25ffdaf24aa50bc1ff617b
--- /dev/null
+++ b/doc/fluid/api/tensor/flatten.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_flatten:
+
+flatten
+-------------------------------
+:doc_source: paddle.fluid.layers.flatten
+
+
diff --git a/doc/fluid/api/tensor/floor.rst b/doc/fluid/api/tensor/floor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..869fe176116f3552481b43e0a742240c099f1f21
--- /dev/null
+++ b/doc/fluid/api/tensor/floor.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_floor:
+
+floor
+-------------------------------
+:doc_source: paddle.fluid.layers.floor
+
+
diff --git a/doc/fluid/api/tensor/full.rst b/doc/fluid/api/tensor/full.rst
new file mode 100644
index 0000000000000000000000000000000000000000..842cb4b074c8a1f3dffc674ed9fe86359d1f85e0
--- /dev/null
+++ b/doc/fluid/api/tensor/full.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_full:
+
+full
+--------
+
+.. autofunction:: paddle.tensor.creation.full
+ :noindex:
diff --git a/doc/fluid/api/tensor/full_like.rst b/doc/fluid/api/tensor/full_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f5f876d570aab434c874c35663ee1f5ddb2b4b7e
--- /dev/null
+++ b/doc/fluid/api/tensor/full_like.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_full_like:
+
+full_like
+------------
+
+.. autofunction:: paddle.tensor.creation.full_like
+ :noindex:
diff --git a/doc/fluid/api/tensor/gather.rst b/doc/fluid/api/tensor/gather.rst
new file mode 100644
index 0000000000000000000000000000000000000000..68137b46c479455e0d8e19cc9411add758e558ec
--- /dev/null
+++ b/doc/fluid/api/tensor/gather.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_gather:
+
+gather
+--------
+
+.. autofunction:: paddle.tensor.manipulation.gather
+ :noindex:
+
+
diff --git a/doc/fluid/api/tensor/gather_nd.rst b/doc/fluid/api/tensor/gather_nd.rst
new file mode 100644
index 0000000000000000000000000000000000000000..93c95cb491e4a55c3d2d31c90036266ef980e434
--- /dev/null
+++ b/doc/fluid/api/tensor/gather_nd.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_gather_nd:
+
+gather_nd
+----------
+
+.. autofunction:: paddle.tensor.manipulation.gather_nd
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/greater_equal.rst b/doc/fluid/api/tensor/greater_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1a1394de05e7b4bf7b4cbfb463e3c9e79206d9cc
--- /dev/null
+++ b/doc/fluid/api/tensor/greater_equal.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_greater_equal:
+
+greater_equal
+-------------------------------
+:doc_source: paddle.tensor.greater_equal
+
+
diff --git a/doc/fluid/api/tensor/greater_than.rst b/doc/fluid/api/tensor/greater_than.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b0ff74910eb094120568dc4f3c7f792e221c91b7
--- /dev/null
+++ b/doc/fluid/api/tensor/greater_than.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_greater_than:
+
+greater_than
+-------------------------------
+:doc_source: paddle.tensor.greater_than
+
+
diff --git a/doc/fluid/api/tensor/has_inf.rst b/doc/fluid/api/tensor/has_inf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0a289aeaabe48bbc4ec4ae15334c36a5df65a758
--- /dev/null
+++ b/doc/fluid/api/tensor/has_inf.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_has_inf:
+
+has_inf
+-------------------------------
+:doc_source: paddle.fluid.layers.has_inf
+
+
diff --git a/doc/fluid/api/tensor/has_nan.rst b/doc/fluid/api/tensor/has_nan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..411f1170345749ee2ca59b45d8a0a2f2d3ccb6c5
--- /dev/null
+++ b/doc/fluid/api/tensor/has_nan.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_has_nan:
+
+has_nan
+-------------------------------
+:doc_source: paddle.fluid.layers.has_nan
+
+
diff --git a/doc/fluid/api/tensor/increment.rst b/doc/fluid/api/tensor/increment.rst
new file mode 100644
index 0000000000000000000000000000000000000000..044ed8ace8be80a869b4cf3f1ff403c2eb419bd5
--- /dev/null
+++ b/doc/fluid/api/tensor/increment.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_increment:
+
+increment
+-------------------------------
+:doc_source: paddle.fluid.layers.increment
+
+
diff --git a/doc/fluid/api/tensor/index_select.rst b/doc/fluid/api/tensor/index_select.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb97773baefe511c47602988622baf96d6ae110b
--- /dev/null
+++ b/doc/fluid/api/tensor/index_select.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_search_index_select:
+
+index_select
+-------------
+
+.. autofunction:: paddle.tensor.search.index_select
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/is_empty.rst b/doc/fluid/api/tensor/is_empty.rst
new file mode 100644
index 0000000000000000000000000000000000000000..822c45ef1afd5f4b40b24ce2286bc972253a1452
--- /dev/null
+++ b/doc/fluid/api/tensor/is_empty.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_is_empty:
+
+is_empty
+-------------------------------
+:doc_source: paddle.fluid.layers.is_empty
+
+
diff --git a/doc/fluid/api/tensor/isfinite.rst b/doc/fluid/api/tensor/isfinite.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8a599a8ab6eb2217070a5bc5eccea4f378b1ee04
--- /dev/null
+++ b/doc/fluid/api/tensor/isfinite.rst
@@ -0,0 +1,8 @@
+.. _api_tensor_isfinite:
+
+isfinite
+-------------------------------
+
+.. autofunction:: paddle.tensor.math.isfinite
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/isinf.rst b/doc/fluid/api/tensor/isinf.rst
new file mode 100644
index 0000000000000000000000000000000000000000..df86445ecfb15c4fda06f442b7b4c4f7cd0d5c0f
--- /dev/null
+++ b/doc/fluid/api/tensor/isinf.rst
@@ -0,0 +1,8 @@
+.. _api_tensor_isinf:
+
+isinf
+-------------------------------
+
+.. autofunction:: paddle.tensor.math.isinf
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/isnan.rst b/doc/fluid/api/tensor/isnan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0fa742c2d2c26d6040a5f303e925c06114d8b93a
--- /dev/null
+++ b/doc/fluid/api/tensor/isnan.rst
@@ -0,0 +1,8 @@
+.. _api_tensor_isnan:
+
+isnan
+-------------------------------
+
+.. autofunction:: paddle.tensor.math.isnan
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/less_equal.rst b/doc/fluid/api/tensor/less_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4adbeb1ccf2972ccb30cb1fb762dbea7a74114a4
--- /dev/null
+++ b/doc/fluid/api/tensor/less_equal.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_less_equal:
+
+less_equal
+-------------------------------
+:doc_source: paddle.tensor.less_equal
+
+
diff --git a/doc/fluid/api/tensor/less_than.rst b/doc/fluid/api/tensor/less_than.rst
new file mode 100644
index 0000000000000000000000000000000000000000..592dc48d66bbdd4c6506e118c98b654bd55e93fe
--- /dev/null
+++ b/doc/fluid/api/tensor/less_than.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_less_than:
+
+less_than
+-------------------------------
+:doc_source: paddle.tensor.less_than
+
+
diff --git a/doc/fluid/api/tensor/linalg.rst b/doc/fluid/api/tensor/linalg.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08cabf1c4bfb9ee5be5b8d843ce0936707c8a50c
--- /dev/null
+++ b/doc/fluid/api/tensor/linalg.rst
@@ -0,0 +1,8 @@
+======
+linalg
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ linalg/dist.rst
diff --git a/doc/fluid/api/tensor/linalg/dist.rst b/doc/fluid/api/tensor/linalg/dist.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6ca92b5366fcfc3cae450ab8790b0773d276a57c
--- /dev/null
+++ b/doc/fluid/api/tensor/linalg/dist.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_linalg_dist:
+
+dist
+--------
+
+.. autofunction:: paddle.tensor.linalg.dist
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/linspace.rst b/doc/fluid/api/tensor/linspace.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b8fecff2b9be66aa8bfbb6eb4ebed5f3bf564f8f
--- /dev/null
+++ b/doc/fluid/api/tensor/linspace.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_linspace:
+
+linspace
+--------
+
+.. autofunction:: paddle.tensor.creation.linspace
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/load.rst b/doc/fluid/api/tensor/load.rst
new file mode 100644
index 0000000000000000000000000000000000000000..eba05933ca3bd83e6dd97d2625b74090b19d513c
--- /dev/null
+++ b/doc/fluid/api/tensor/load.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.io.load
+
+
diff --git a/doc/fluid/api/tensor/log.rst b/doc/fluid/api/tensor/log.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2e748ae1f1aa7979a6b027bf1aa0396171ba6183
--- /dev/null
+++ b/doc/fluid/api/tensor/log.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_log:
+
+log
+-------------------------------
+:doc_source: paddle.fluid.layers.log
+
+
diff --git a/doc/fluid/api/tensor/logic.rst b/doc/fluid/api/tensor/logic.rst
new file mode 100644
index 0000000000000000000000000000000000000000..389c83b100894432c202533508bd2fa173c53246
--- /dev/null
+++ b/doc/fluid/api/tensor/logic.rst
@@ -0,0 +1,8 @@
+======
+logic
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ logic/allclose.rst
\ No newline at end of file
diff --git a/doc/fluid/api/tensor/logic/allclose.rst b/doc/fluid/api/tensor/logic/allclose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..72a8c73d61df39271a187aa9fa3e56eb90006844
--- /dev/null
+++ b/doc/fluid/api/tensor/logic/allclose.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_logic_allclose:
+
+allclose
+--------
+
+.. autofunction:: paddle.tensor.logic.allclose
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/tensor/logical_and.rst b/doc/fluid/api/tensor/logical_and.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e9f4b93cfa9175ab2fc4fffb5b1336ca13db7319
--- /dev/null
+++ b/doc/fluid/api/tensor/logical_and.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_logical_and:
+
+logical_and
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_and
+
+
diff --git a/doc/fluid/api/tensor/logical_not.rst b/doc/fluid/api/tensor/logical_not.rst
new file mode 100644
index 0000000000000000000000000000000000000000..db8e64183c86bc959b5c44e21c17f573e124147b
--- /dev/null
+++ b/doc/fluid/api/tensor/logical_not.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_logical_not:
+
+logical_not
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_not
+
+
diff --git a/doc/fluid/api/tensor/logical_or.rst b/doc/fluid/api/tensor/logical_or.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ab0747e60aac6295255225dbeed16e08a8acc835
--- /dev/null
+++ b/doc/fluid/api/tensor/logical_or.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_logical_or:
+
+logical_or
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_or
+
+
diff --git a/doc/fluid/api/tensor/logical_xor.rst b/doc/fluid/api/tensor/logical_xor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..660c5cff8281c84e79410a6e058f68bc2003a463
--- /dev/null
+++ b/doc/fluid/api/tensor/logical_xor.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_logical_xor:
+
+logical_xor
+-------------------------------
+:doc_source: paddle.fluid.layers.logical_xor
+
+
diff --git a/doc/fluid/api/tensor/masked_select.rst b/doc/fluid/api/tensor/masked_select.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b3e0d11be9c38ba8e7459903e0198a8d1e39676d
--- /dev/null
+++ b/doc/fluid/api/tensor/masked_select.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_search_masked_select:
+
+masked_select
+---------------
+
+.. autofunction:: paddle.tensor.search.masked_select
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math.rst b/doc/fluid/api/tensor/math.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8afa8020941359f9391e81b4ab043a5467ffc516
--- /dev/null
+++ b/doc/fluid/api/tensor/math.rst
@@ -0,0 +1,32 @@
+====
+math
+====
+
+.. toctree::
+ :maxdepth: 1
+
+ math/add.rst
+ math/addcmul.rst
+ math/addmm.rst
+ math/atan.rst
+ math/clamp.rst
+ math/divide.rst
+ math/floor_divide.rst
+ math/remainder.rst
+ math/floor_mod.rst
+ math/mod.rst
+ math/elementwise_sum.rst
+ math/log1p.rst
+ math/logsumexp.rst
+ math/max.rst
+ math/min.rst
+ math/mm.rst
+ math/mul.rst
+ math/multiply.rst
+ math/pow.rst
+ math/prod.rst
+ math/sign.rst
+ math/sin.rst
+ math/sqrt.rst
+ math/sum.rst
+ math/tanh.rst
diff --git a/doc/fluid/api/tensor/math/add.rst b/doc/fluid/api/tensor/math/add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0b604ac2d1805066bfa24f8a12c8fbed36b14d0d
--- /dev/null
+++ b/doc/fluid/api/tensor/math/add.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_add:
+
+add
+---
+
+.. autofunction:: paddle.tensor.math.add
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/atan.rst b/doc/fluid/api/tensor/math/atan.rst
new file mode 100644
index 0000000000000000000000000000000000000000..31b11dbbe4fbc39d7f5c472478cea1544edfefe7
--- /dev/null
+++ b/doc/fluid/api/tensor/math/atan.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_atan:
+
+atan
+----
+
+.. autofunction:: paddle.tensor.math.atan
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/div.rst b/doc/fluid/api/tensor/math/div.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cf8397dbffc36f895319dced427487e7f3851d40
--- /dev/null
+++ b/doc/fluid/api/tensor/math/div.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_div:
+
+div
+---
+
+.. autofunction:: paddle.tensor.math.div
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/divide.rst b/doc/fluid/api/tensor/math/divide.rst
new file mode 100644
index 0000000000000000000000000000000000000000..db7c8aa3cd242d58f34e19845a5102b5a90c6b7e
--- /dev/null
+++ b/doc/fluid/api/tensor/math/divide.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_divide:
+
+divide
+------
+
+.. autofunction:: paddle.tensor.math.divide
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/elementwise_sum.rst b/doc/fluid/api/tensor/math/elementwise_sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..05acb3f78f66192b5eea938cdd528b56da247a22
--- /dev/null
+++ b/doc/fluid/api/tensor/math/elementwise_sum.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_elementwise_sum:
+
+elementwise_sum
+---------------
+
+.. autofunction:: paddle.tensor.math.elementwise_sum
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/floor_divide.rst b/doc/fluid/api/tensor/math/floor_divide.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aed75d9790babc2d0e7007f6df313f1987241e66
--- /dev/null
+++ b/doc/fluid/api/tensor/math/floor_divide.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_floor_divide:
+
+floor_divide
+------------
+
+.. autofunction:: paddle.tensor.math.floor_divide
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/floor_mod.rst b/doc/fluid/api/tensor/math/floor_mod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..655e419119b70e73cc8d68af42692923ae1b6e9c
--- /dev/null
+++ b/doc/fluid/api/tensor/math/floor_mod.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_floor_mod:
+
+floor_mod
+---------
+
+.. autofunction:: paddle.tensor.math.floor_mod
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/logsumexp.rst b/doc/fluid/api/tensor/math/logsumexp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..63a25475394863db763250e653ec1348d18e0726
--- /dev/null
+++ b/doc/fluid/api/tensor/math/logsumexp.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_logsumexp:
+
+logsumexp
+---------
+
+.. autofunction:: paddle.tensor.math.logsumexp
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/mm.rst b/doc/fluid/api/tensor/math/mm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8668c44055f25025ae080b3a08cce39919cd888b
--- /dev/null
+++ b/doc/fluid/api/tensor/math/mm.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_mm:
+
+mm
+--
+
+.. autofunction:: paddle.tensor.math.mm
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/mod.rst b/doc/fluid/api/tensor/math/mod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a5e207a6c43331c150050bc292c32de2db5f4243
--- /dev/null
+++ b/doc/fluid/api/tensor/math/mod.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_mod:
+
+mod
+---
+
+.. autofunction:: paddle.tensor.math.mod
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/mul.rst b/doc/fluid/api/tensor/math/mul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9b14559a4a35c701b8169cf425c1e8787db3561c
--- /dev/null
+++ b/doc/fluid/api/tensor/math/mul.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_mul:
+
+mul
+---
+
+.. autofunction:: paddle.tensor.math.mul
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/multiply.rst b/doc/fluid/api/tensor/math/multiply.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e483e0214e2caac2d38d489d9071be005f4cdb19
--- /dev/null
+++ b/doc/fluid/api/tensor/math/multiply.rst
@@ -0,0 +1,9 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+.. _api_tensor_math_multiply:
+
+multiply
+--------
+
+ .. autofunction:: paddle.tensor.math.multiply
+ :noindex:
\ No newline at end of file
diff --git a/doc/fluid/api/tensor/math/pow.rst b/doc/fluid/api/tensor/math/pow.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d0da558dd738aee845ef01c14d62ba4f023e921
--- /dev/null
+++ b/doc/fluid/api/tensor/math/pow.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_pow:
+
+pow
+---
+
+.. autofunction:: paddle.tensor.math.pow
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/prod.rst b/doc/fluid/api/tensor/math/prod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b5ced4643775d9001501342ef852dfd38108d49d
--- /dev/null
+++ b/doc/fluid/api/tensor/math/prod.rst
@@ -0,0 +1,9 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+.. _api_tensor_math_prod:
+
+prod
+----
+
+.. autofunction:: paddle.tensor.math.prod
+ :noindex:
diff --git a/doc/fluid/api/tensor/math/remainder.rst b/doc/fluid/api/tensor/math/remainder.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d753fd1310ef994c83a46e06cfa8c4befd4ed88
--- /dev/null
+++ b/doc/fluid/api/tensor/math/remainder.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_remainder:
+
+remainder
+---------
+
+.. autofunction:: paddle.tensor.math.remainder
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/sign.rst b/doc/fluid/api/tensor/math/sign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5ee442aec6366911066c206e5e33c9a6f5e5e743
--- /dev/null
+++ b/doc/fluid/api/tensor/math/sign.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+.. _api_tensor_math_sign:
+
+sign
+------
+
+.. autofunction:: paddle.tensor.math.sign
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/sin.rst b/doc/fluid/api/tensor/math/sin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..862334131da6f38a853fcff1ed5860db625561ae
--- /dev/null
+++ b/doc/fluid/api/tensor/math/sin.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_sin:
+
+sin
+---
+
+.. autofunction:: paddle.tensor.math.sin
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/sqrt.rst b/doc/fluid/api/tensor/math/sqrt.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c0ad257993458844e18e514731f988b289a8e73f
--- /dev/null
+++ b/doc/fluid/api/tensor/math/sqrt.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_sqrt:
+
+sqrt
+----
+
+.. autofunction:: paddle.tensor.math.sqrt
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/sum.rst b/doc/fluid/api/tensor/math/sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8946b3aa5056818046793ac856962c6ee4e0d175
--- /dev/null
+++ b/doc/fluid/api/tensor/math/sum.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_sum:
+
+sum
+---
+
+.. autofunction:: paddle.tensor.math.sum
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/math/tanh.rst b/doc/fluid/api/tensor/math/tanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ceb5971e0c3e51565f9fedec4df881eab5f86396
--- /dev/null
+++ b/doc/fluid/api/tensor/math/tanh.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_math_tanh:
+
+tanh
+----
+
+.. autofunction:: paddle.tensor.math.tanh
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/max.rst b/doc/fluid/api/tensor/max.rst
new file mode 100644
index 0000000000000000000000000000000000000000..61a8667f8cab06a8433d9ab9e143390d3c1ccbc8
--- /dev/null
+++ b/doc/fluid/api/tensor/max.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_max:
+
+max
+-------------------------------
+:doc_source: paddle.tensor.max
+
+
diff --git a/doc/fluid/api/tensor/maximum.rst b/doc/fluid/api/tensor/maximum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7c91c5f2bd465a17ceae3a2f602addbd115ed273
--- /dev/null
+++ b/doc/fluid/api/tensor/maximum.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_maximum:
+
+maximum
+-------------------------------
+:doc_source: paddle.tensor.maximum
+
+
diff --git a/doc/fluid/api/tensor/mean.rst b/doc/fluid/api/tensor/mean.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d226a37107af8e67ef4d8ea0bf9a17e536fede36
--- /dev/null
+++ b/doc/fluid/api/tensor/mean.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_mean:
+
+mean
+---------
+
+.. autofunction:: paddle.tensor.mean
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/min.rst b/doc/fluid/api/tensor/min.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cdb8df5c370ce66a5e8e39555699e09730bdcf23
--- /dev/null
+++ b/doc/fluid/api/tensor/min.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_min:
+
+min
+-------------------------------
+:doc_source: paddle.tensor.min
+
+
diff --git a/doc/fluid/api/tensor/minimum.rst b/doc/fluid/api/tensor/minimum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..725aaeb8a7f2fa0cf7b1a7fa1d8611a4c4967ac7
--- /dev/null
+++ b/doc/fluid/api/tensor/minimum.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_minimum:
+
+minimum
+-------------------------------
+:doc_source: paddle.tensor.minimum
+
+
diff --git a/doc/fluid/api/tensor/mm.rst b/doc/fluid/api/tensor/mm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb11742e525dc371f542f7de2cdea442571891e6
--- /dev/null
+++ b/doc/fluid/api/tensor/mm.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_mm:
+
+mm
+-------------------------------
+:doc_source: paddle.fluid.layers.matmul
+
+
diff --git a/doc/fluid/api/tensor/mul.rst b/doc/fluid/api/tensor/mul.rst
new file mode 100644
index 0000000000000000000000000000000000000000..40af2fcfa1946fcd9040b11f22419960bc892736
--- /dev/null
+++ b/doc/fluid/api/tensor/mul.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_mul:
+
+mul
+-------------------------------
+:doc_source: paddle.fluid.layers.mul
+
+
diff --git a/doc/fluid/api/tensor/multiplex.rst b/doc/fluid/api/tensor/multiplex.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b66aa5287e3bde38007505e5395ff8e931c34c86
--- /dev/null
+++ b/doc/fluid/api/tensor/multiplex.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_multiplex:
+
+multiplex
+-------------------------------
+:doc_source: paddle.fluid.layers.multiplex
+
+
diff --git a/doc/fluid/api/tensor/norm.rst b/doc/fluid/api/tensor/norm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..453ea0b63674dd746c5903e11f495d64b20f89f9
--- /dev/null
+++ b/doc/fluid/api/tensor/norm.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_linalg_norm:
+
+norm
+------------
+
+.. autofunction:: paddle.tensor.linalg.norm
+ :noindex:
diff --git a/doc/fluid/api/tensor/not_equal.rst b/doc/fluid/api/tensor/not_equal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8aeac42d73c7683ba037bef31a6b68c2acf01064
--- /dev/null
+++ b/doc/fluid/api/tensor/not_equal.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_not_equal:
+
+not_equal
+-------------------------------
+:doc_source: paddle.tensor.not_equal
+
+
diff --git a/doc/fluid/api/tensor/numel.rst b/doc/fluid/api/tensor/numel.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ea48ba7de7a96c302351636d0cd99e5a184c3a5e
--- /dev/null
+++ b/doc/fluid/api/tensor/numel.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_stat_numel:
+
+numel
+-------
+
+.. autofunction:: paddle.tensor.stat.numel
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/ones.rst b/doc/fluid/api/tensor/ones.rst
new file mode 100644
index 0000000000000000000000000000000000000000..150ab46d8795a56d60b014831c2f437562c3bc84
--- /dev/null
+++ b/doc/fluid/api/tensor/ones.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_ones:
+
+ones
+--------
+
+.. autofunction:: paddle.tensor.creation_ones
+ :noindex:
+
+
diff --git a/doc/fluid/api/tensor/ones_like.rst b/doc/fluid/api/tensor/ones_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..47ecd764f36b11d425863b0a09a111040aed31d2
--- /dev/null
+++ b/doc/fluid/api/tensor/ones_like.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_ones_like:
+
+ones_like
+---------
+
+.. autofunction:: paddle.tensor.ones_like
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/pow.rst b/doc/fluid/api/tensor/pow.rst
new file mode 100644
index 0000000000000000000000000000000000000000..875b26cda0f71e8bbfeda60fe31bb0673d282c4b
--- /dev/null
+++ b/doc/fluid/api/tensor/pow.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_pow:
+
+pow
+-------------------------------
+:doc_source: paddle.fluid.layers.pow
+
+
diff --git a/doc/fluid/api/tensor/random.rst b/doc/fluid/api/tensor/random.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b119e70bb3226a9bf24f5d831da1a6b685abbca5
--- /dev/null
+++ b/doc/fluid/api/tensor/random.rst
@@ -0,0 +1,14 @@
+======
+random
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ random/normal.rst
+ random/rand.rst
+ random/randint.rst
+ random/randn.rst
+ random/randperm.rst
+ random/standard_normal.rst
+ random/uniform.rst
diff --git a/doc/fluid/api/tensor/random/normal.rst b/doc/fluid/api/tensor/random/normal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1617ef9ba4b53424a37ce7972952a27c0b686740
--- /dev/null
+++ b/doc/fluid/api/tensor/random/normal.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_normal:
+
+normal
+------
+
+.. autofunction:: paddle.tensor.random.normal
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/rand.rst b/doc/fluid/api/tensor/random/rand.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7ad959e99d516a080f7e65f66eb385f6e4fe495b
--- /dev/null
+++ b/doc/fluid/api/tensor/random/rand.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_rand:
+
+rand
+----
+
+.. autofunction:: paddle.tensor.random.rand
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/randint.rst b/doc/fluid/api/tensor/random/randint.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e5a9d6139f425536ef05ab9ef28f3fba625ce7ad
--- /dev/null
+++ b/doc/fluid/api/tensor/random/randint.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_randint:
+
+randint
+-------
+
+.. autofunction:: paddle.tensor.random.randint
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/randn.rst b/doc/fluid/api/tensor/random/randn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d332aaa1441ff4634a5674bd4765ac0534ab39b
--- /dev/null
+++ b/doc/fluid/api/tensor/random/randn.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_randn:
+
+randn
+-----
+
+.. autofunction:: paddle.tensor.random.randn
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/randperm.rst b/doc/fluid/api/tensor/random/randperm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0aa4cc612db88a78abc8b3bb91fc347bed5b6f5a
--- /dev/null
+++ b/doc/fluid/api/tensor/random/randperm.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_randperm:
+
+randperm
+--------
+
+.. autofunction:: paddle.tensor.random.randperm
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/standard_normal.rst b/doc/fluid/api/tensor/random/standard_normal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a279a26c077ea05a62bacd3f709624ea5fc7438d
--- /dev/null
+++ b/doc/fluid/api/tensor/random/standard_normal.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_standard_normal:
+
+standard_normal
+---------------
+
+.. autofunction:: paddle.tensor.random.standard_normal
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/random/uniform.rst b/doc/fluid/api/tensor/random/uniform.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b323d371212dbbbe760267d4ec4551f339b69f52
--- /dev/null
+++ b/doc/fluid/api/tensor/random/uniform.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_random_uniform:
+
+uniform
+-------
+
+.. autofunction:: paddle.tensor.random.uniform
+ :noindex:
diff --git a/doc/fluid/api/tensor/rank.rst b/doc/fluid/api/tensor/rank.rst
new file mode 100644
index 0000000000000000000000000000000000000000..716f919d98e9b493e238f265ae1f256b3496dd48
--- /dev/null
+++ b/doc/fluid/api/tensor/rank.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_rank:
+
+rank
+-------------------------------
+:doc_source: paddle.fluid.layers.rank
+
+
diff --git a/doc/fluid/api/tensor/reciprocal.rst b/doc/fluid/api/tensor/reciprocal.rst
new file mode 100644
index 0000000000000000000000000000000000000000..42709596bf9fe2d9c2b10af99cf32653cf897d4f
--- /dev/null
+++ b/doc/fluid/api/tensor/reciprocal.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reciprocal:
+
+reciprocal
+-------------------------------
+:doc_source: paddle.fluid.layers.reciprocal
+
+
diff --git a/doc/fluid/api/tensor/reduce_all.rst b/doc/fluid/api/tensor/reduce_all.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cac7df54d7ec563433de7b7c8528e3513921e940
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_all.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_all:
+
+reduce_all
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_all
+
+
diff --git a/doc/fluid/api/tensor/reduce_any.rst b/doc/fluid/api/tensor/reduce_any.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e3aa0a7210f854332d166c8817913eccb376fcdc
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_any.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_any:
+
+reduce_any
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_any
+
+
diff --git a/doc/fluid/api/tensor/reduce_max.rst b/doc/fluid/api/tensor/reduce_max.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7c502ba298f14f72dace417485139146031ab5d4
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_max.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_max:
+
+reduce_max
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_max
+
+
diff --git a/doc/fluid/api/tensor/reduce_mean.rst b/doc/fluid/api/tensor/reduce_mean.rst
new file mode 100644
index 0000000000000000000000000000000000000000..475b86ff0ab1daeee7b0167631fd74b5421cb336
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_mean.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_mean:
+
+reduce_mean
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_mean
+
+
diff --git a/doc/fluid/api/tensor/reduce_min.rst b/doc/fluid/api/tensor/reduce_min.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9fdf51abf5312c097cce4b7363c7233bffb0257e
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_min.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_min:
+
+reduce_min
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_min
+
+
diff --git a/doc/fluid/api/tensor/reduce_prod.rst b/doc/fluid/api/tensor/reduce_prod.rst
new file mode 100644
index 0000000000000000000000000000000000000000..63ef33caecf23316de66fe9a2a44a6a7dec3b519
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_prod.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_prod:
+
+reduce_prod
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_prod
+
+
diff --git a/doc/fluid/api/tensor/reduce_sum.rst b/doc/fluid/api/tensor/reduce_sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ea9c036b9647aade517d8093cea5beeeac1b8d1a
--- /dev/null
+++ b/doc/fluid/api/tensor/reduce_sum.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reduce_sum:
+
+reduce_sum
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_sum
+
+
diff --git a/doc/fluid/api/tensor/reshape.rst b/doc/fluid/api/tensor/reshape.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c95f9bcc20d05863e8d4933e9f5f904838d63cc1
--- /dev/null
+++ b/doc/fluid/api/tensor/reshape.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reshape:
+
+reshape
+-------------------------------
+:doc_source: paddle.fluid.layers.reshape
+
+
diff --git a/doc/fluid/api/tensor/reverse.rst b/doc/fluid/api/tensor/reverse.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0a81ad4c30fadd77b7b68fa86dcad5ac443eb75a
--- /dev/null
+++ b/doc/fluid/api/tensor/reverse.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_reverse:
+
+reverse
+-------------------------------
+:doc_source: paddle.fluid.layers.reverse
+
+
diff --git a/doc/fluid/api/tensor/round.rst b/doc/fluid/api/tensor/round.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d333f6c39b1e8c740e3b018aa7e6e6c4d1a27519
--- /dev/null
+++ b/doc/fluid/api/tensor/round.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_round:
+
+round
+-------------------------------
+:doc_source: paddle.fluid.layers.round
+
+
diff --git a/doc/fluid/api/tensor/rsqrt.rst b/doc/fluid/api/tensor/rsqrt.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9e31b2fa26fb986708f82a4b88230abb97a675af
--- /dev/null
+++ b/doc/fluid/api/tensor/rsqrt.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_rsqrt:
+
+rsqrt
+-------------------------------
+:doc_source: paddle.fluid.layers.rsqrt
+
+
diff --git a/doc/fluid/api/tensor/save.rst b/doc/fluid/api/tensor/save.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5082778ea025339ef8da7c49a377b8b442359748
--- /dev/null
+++ b/doc/fluid/api/tensor/save.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.save
+
+
diff --git a/doc/fluid/api/tensor/scale.rst b/doc/fluid/api/tensor/scale.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8d30627b0d464f92e82c29ff5ccc3f3025c962d6
--- /dev/null
+++ b/doc/fluid/api/tensor/scale.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_scale:
+
+scale
+-------------------------------
+:doc_source: paddle.fluid.layers.scale
+
+
diff --git a/doc/fluid/api/tensor/scatter.rst b/doc/fluid/api/tensor/scatter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4083f11eedcce87ecfcee63632a72b6587eab088
--- /dev/null
+++ b/doc/fluid/api/tensor/scatter.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_scatter:
+
+scatter
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter
+
+
diff --git a/doc/fluid/api/tensor/scatter_nd.rst b/doc/fluid/api/tensor/scatter_nd.rst
new file mode 100644
index 0000000000000000000000000000000000000000..101d51f148a7e86a9f33f49067def59f75c539fa
--- /dev/null
+++ b/doc/fluid/api/tensor/scatter_nd.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_scatter_nd:
+
+scatter_nd
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter_nd
+
+
diff --git a/doc/fluid/api/tensor/scatter_nd_add.rst b/doc/fluid/api/tensor/scatter_nd_add.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cc8ebaa65fab98c8a145b67c1d142fa1e0a5146c
--- /dev/null
+++ b/doc/fluid/api/tensor/scatter_nd_add.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_scatter_nd_add:
+
+scatter_nd_add
+-------------------------------
+:doc_source: paddle.fluid.layers.scatter_nd_add
+
+
diff --git a/doc/fluid/api/tensor/shape.rst b/doc/fluid/api/tensor/shape.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c141936284983737db47ba463a8fdee4476f0d06
--- /dev/null
+++ b/doc/fluid/api/tensor/shape.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_shape:
+
+shape
+-------------------------------
+:doc_source: paddle.fluid.layers.shape
+
+
diff --git a/doc/fluid/api/tensor/shard_index.rst b/doc/fluid/api/tensor/shard_index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..afee6c6c375ca5ee74857f96a6eead14ac65a979
--- /dev/null
+++ b/doc/fluid/api/tensor/shard_index.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_shard_index:
+
+shard_index
+-------------------------------
+:doc_source: paddle.fluid.layers.shard_index
+
+
diff --git a/doc/fluid/api/tensor/shuffle.rst b/doc/fluid/api/tensor/shuffle.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aaa48eb7df14d29fc50c126879c19bd94e9a5009
--- /dev/null
+++ b/doc/fluid/api/tensor/shuffle.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_shuffle:
+
+shuffle
+-------------------------------
+:doc_source: paddle.fluid.io.shuffle
+
+
diff --git a/doc/fluid/api/tensor/sign.rst b/doc/fluid/api/tensor/sign.rst
new file mode 100644
index 0000000000000000000000000000000000000000..683c09a70d229b44af43399e8648ba7454c101e2
--- /dev/null
+++ b/doc/fluid/api/tensor/sign.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sign:
+
+sign
+-------------------------------
+:doc_source: paddle.fluid.layers.sign
+
+
diff --git a/doc/fluid/api/tensor/sin.rst b/doc/fluid/api/tensor/sin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f0821d84f5c9b1695d68c45b78de99579e1808be
--- /dev/null
+++ b/doc/fluid/api/tensor/sin.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sin:
+
+sin
+-------------------------------
+:doc_source: paddle.fluid.layers.sin
+
+
diff --git a/doc/fluid/api/tensor/slice.rst b/doc/fluid/api/tensor/slice.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8b75aa4014df0f3bbc734b1d2b4c129f725bd517
--- /dev/null
+++ b/doc/fluid/api/tensor/slice.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_slice:
+
+slice
+-------------------------------
+:doc_source: paddle.fluid.layers.slice
+
+
diff --git a/doc/fluid/api/tensor/sort.rst b/doc/fluid/api/tensor/sort.rst
new file mode 100644
index 0000000000000000000000000000000000000000..21da4ab432d026f281b69183d95134f1fbadd553
--- /dev/null
+++ b/doc/fluid/api/tensor/sort.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sort:
+
+sort
+-------------------------------
+:doc_source: paddle.tensor.sort
+
+
diff --git a/doc/fluid/api/tensor/split.rst b/doc/fluid/api/tensor/split.rst
new file mode 100644
index 0000000000000000000000000000000000000000..104169ea5936841521df3f5f63d7342a2258ee95
--- /dev/null
+++ b/doc/fluid/api/tensor/split.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_split:
+
+split
+--------
+
+.. autofunction:: paddle.tensor.manipulation.split
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/sqrt.rst b/doc/fluid/api/tensor/sqrt.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aef16db387a68c43b4091f47e4b0d006311a6f06
--- /dev/null
+++ b/doc/fluid/api/tensor/sqrt.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sqrt:
+
+sqrt
+-------------------------------
+:doc_source: paddle.fluid.layers.sqrt
+
+
diff --git a/doc/fluid/api/tensor/square.rst b/doc/fluid/api/tensor/square.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0eb62226e460002fba6a586613bb0bdb50e12763
--- /dev/null
+++ b/doc/fluid/api/tensor/square.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_square:
+
+square
+-------------------------------
+:doc_source: paddle.fluid.layers.square
+
+
diff --git a/doc/fluid/api/tensor/squeeze.rst b/doc/fluid/api/tensor/squeeze.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ef11141080dfa633de50f7c9cc1d7307521fe34b
--- /dev/null
+++ b/doc/fluid/api/tensor/squeeze.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_squeeze:
+
+squeeze
+-------------------------------
+:doc_source: paddle.fluid.layers.squeeze
+
+
diff --git a/doc/fluid/api/tensor/stack.rst b/doc/fluid/api/tensor/stack.rst
new file mode 100644
index 0000000000000000000000000000000000000000..02e9d20f0ac6d85efd63348bf0eb6cb8f8ed74e0
--- /dev/null
+++ b/doc/fluid/api/tensor/stack.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_stack:
+
+stack
+-------------------------------
+:doc_source: paddle.fluid.layers.stack
+
+
diff --git a/doc/fluid/api/tensor/stanh.rst b/doc/fluid/api/tensor/stanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9450cf6f55fabaf8f0786472d6b3edcd591ff868
--- /dev/null
+++ b/doc/fluid/api/tensor/stanh.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_stanh:
+
+stanh
+-------------------------------
+:doc_source: paddle.fluid.layers.stanh
+
+
diff --git a/doc/fluid/api/tensor/std.rst b/doc/fluid/api/tensor/std.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3d8db35c14dd6ad64f0dd23bbcc42b3e3101481d
--- /dev/null
+++ b/doc/fluid/api/tensor/std.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_std:
+
+std
+---------
+
+.. autofunction:: paddle.tensor.std
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/strided_slice.rst b/doc/fluid/api/tensor/strided_slice.rst
new file mode 100644
index 0000000000000000000000000000000000000000..249834f67728fc704919ed99f68d0847994efe07
--- /dev/null
+++ b/doc/fluid/api/tensor/strided_slice.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_strided_slice:
+
+strided_slice
+-------------------------------
+:doc_source: paddle.fluid.layers.strided_slice
+
+
diff --git a/doc/fluid/api/tensor/sum.rst b/doc/fluid/api/tensor/sum.rst
new file mode 100644
index 0000000000000000000000000000000000000000..234e723acab4ba4b86fdbf278280113a16340f28
--- /dev/null
+++ b/doc/fluid/api/tensor/sum.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sum:
+
+sum
+-------------------------------
+:doc_source: paddle.fluid.layers.reduce_sum
+
+
diff --git a/doc/fluid/api/tensor/sums.rst b/doc/fluid/api/tensor/sums.rst
new file mode 100644
index 0000000000000000000000000000000000000000..356c8c673a8a01523dad147eb19f6566e01da999
--- /dev/null
+++ b/doc/fluid/api/tensor/sums.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_sums:
+
+sums
+-------------------------------
+:doc_source: paddle.fluid.layers.sums
+
+
diff --git a/doc/fluid/api/tensor/tanh.rst b/doc/fluid/api/tensor/tanh.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3523aaf1577e80cf64d6d0dfc8b251a01de0875f
--- /dev/null
+++ b/doc/fluid/api/tensor/tanh.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_tanh:
+
+tanh
+-------------------------------
+:doc_source: paddle.fluid.layers.tanh
+
+
diff --git a/doc/fluid/api/tensor/topk.rst b/doc/fluid/api/tensor/topk.rst
new file mode 100644
index 0000000000000000000000000000000000000000..33a36eb2426a6d0186cfa067c71c293e4296674c
--- /dev/null
+++ b/doc/fluid/api/tensor/topk.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_topk:
+
+topk
+-------------------------------
+:doc_source: paddle.tensor.topk
+
+
diff --git a/doc/fluid/api/tensor/transpose.rst b/doc/fluid/api/tensor/transpose.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5c4ea641482241c3dcfe411ac1996bb40ce20faf
--- /dev/null
+++ b/doc/fluid/api/tensor/transpose.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_transpose:
+
+transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.transpose
+
+
diff --git a/doc/fluid/api/tensor/unique.rst b/doc/fluid/api/tensor/unique.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fe8be883a3f7537cb01815ec978455f5908ea355
--- /dev/null
+++ b/doc/fluid/api/tensor/unique.rst
@@ -0,0 +1,10 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_manipulation_unique:
+
+unique
+--------
+
+.. autofunction:: paddle.tensor.manipulation.unique
+
diff --git a/doc/fluid/api/tensor/unique_with_counts.rst b/doc/fluid/api/tensor/unique_with_counts.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3416604445f0a6125eb980e7a1a26501566fd3f3
--- /dev/null
+++ b/doc/fluid/api/tensor/unique_with_counts.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_unique_with_counts:
+
+unique_with_counts
+-------------------------------
+:doc_source: paddle.fluid.layers.unique_with_counts
+
+
diff --git a/doc/fluid/api/tensor/unsqueeze.rst b/doc/fluid/api/tensor/unsqueeze.rst
new file mode 100644
index 0000000000000000000000000000000000000000..229e923a596e7b5e6b2a103708877483f02c87f9
--- /dev/null
+++ b/doc/fluid/api/tensor/unsqueeze.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_unsqueeze:
+
+unsqueeze
+-------------------------------
+:doc_source: paddle.fluid.layers.unsqueeze
+
+
diff --git a/doc/fluid/api/tensor/unstack.rst b/doc/fluid/api/tensor/unstack.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e909f6e14b1a24098871e7a07f772badf9787bea
--- /dev/null
+++ b/doc/fluid/api/tensor/unstack.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_unstack:
+
+unstack
+-------------------------------
+:doc_source: paddle.fluid.layers.unstack
+
+
diff --git a/doc/fluid/api/tensor/var.rst b/doc/fluid/api/tensor/var.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4a617dec382b6e4e1d1926987500fec8f3e1c7cd
--- /dev/null
+++ b/doc/fluid/api/tensor/var.rst
@@ -0,0 +1,11 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_var:
+
+var
+---------
+
+.. autofunction:: paddle.tensor.var
+ :noindex:
+
diff --git a/doc/fluid/api/tensor/where.rst b/doc/fluid/api/tensor/where.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d5f306dffd90746d631062d5804fa8f79635e287
--- /dev/null
+++ b/doc/fluid/api/tensor/where.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_where:
+
+where
+-------------------------------
+:doc_source: paddle.fluid.layers.cond
+
+
diff --git a/doc/fluid/api/tensor/zeros.rst b/doc/fluid/api/tensor/zeros.rst
new file mode 100644
index 0000000000000000000000000000000000000000..19d4fb7a2f8dee04db30a920fc6f2b4153eae321
--- /dev/null
+++ b/doc/fluid/api/tensor/zeros.rst
@@ -0,0 +1,12 @@
+.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
+ !DO NOT EDIT THIS FILE MANUALLY!
+
+.. _api_tensor_creation_zeros:
+
+zeros
+--------
+
+.. autofunction:: paddle.tensor.creation.zeros
+ :noindex:
+
+
diff --git a/doc/fluid/api/tensor/zeros_like.rst b/doc/fluid/api/tensor/zeros_like.rst
new file mode 100644
index 0000000000000000000000000000000000000000..150f4ad84abd85db34457d76264a2daa9e863272
--- /dev/null
+++ b/doc/fluid/api/tensor/zeros_like.rst
@@ -0,0 +1,7 @@
+.. _api_tensor_cn_zeros_like:
+
+zeros_like
+-------------------------------
+:doc_source: paddle.fluid.layers.zeros_like
+
+
diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst
index 2492b98136f85ccb49922c08b096be4f7eb96d7d..28905bd06b502b30df36f03d6aea8c1295eef02f 100644
--- a/doc/fluid/api/transpiler.rst
+++ b/doc/fluid/api/transpiler.rst
@@ -10,4 +10,3 @@ fluid.transpiler
transpiler/HashName.rst
transpiler/memory_optimize.rst
transpiler/release_memory.rst
- transpiler/RoundRobin.rst
diff --git a/doc/fluid/api/transpiler/RoundRobin.rst b/doc/fluid/api/transpiler/RoundRobin.rst
deleted file mode 100644
index 547757d20e8388b3ea51b52a0b4c9e23116f0645..0000000000000000000000000000000000000000
--- a/doc/fluid/api/transpiler/RoundRobin.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
- !DO NOT EDIT THIS FILE MANUALLY!
-
-.. _api_fluid_transpiler_RoundRobin:
-
-RoundRobin
-----------
-
-.. autoclass:: paddle.fluid.transpiler.RoundRobin
- :members:
- :inherited-members:
- :noindex:
-
diff --git a/doc/fluid/api_cn/api_tree_cn.rst b/doc/fluid/api_cn/api_tree_cn.rst
index 3abcd3a1865e465afa80bf7fb3ab0807d3e1d94d..4625348ff97fd825bd85dd4bf9eb82649e637028 100644
--- a/doc/fluid/api_cn/api_tree_cn.rst
+++ b/doc/fluid/api_cn/api_tree_cn.rst
@@ -13,7 +13,9 @@ API接口
data/dataset_cn.rst
data_feeder_cn.rst
dataset_cn.rst
+ distributed_cn.rst
dygraph_cn.rst
+ static_cn.rst
executor_cn.rst
initializer_cn.rst
io_cn.rst
@@ -25,5 +27,6 @@ API接口
regularizer_cn.rst
transpiler_cn.rst
unique_name_cn.rst
+ static_cn.rst
diff --git a/doc/fluid/api_cn/backward_cn/append_backward_cn.rst b/doc/fluid/api_cn/backward_cn/append_backward_cn.rst
index 1019bab98b6e6a667c97f5f6bf926b5026023fac..b3aa98f93e501d66687689c47c611677b4d7673e 100644
--- a/doc/fluid/api_cn/backward_cn/append_backward_cn.rst
+++ b/doc/fluid/api_cn/backward_cn/append_backward_cn.rst
@@ -3,10 +3,13 @@
append_backward
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.backward.append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口将向主程序(``main_program``)追加反向部分 。
完整的神经网络训练由前向和反向传播组成。但是当我们配置网络时,我们只需要指定其前向部分。
diff --git a/doc/fluid/api_cn/backward_cn/gradients_cn.rst b/doc/fluid/api_cn/backward_cn/gradients_cn.rst
index 5bb45f2074c9202798033d9813cdcaccdc6f2244..0165f7f2f3d0bc61bde4b08c3b79ddf1fee4e4ba 100644
--- a/doc/fluid/api_cn/backward_cn/gradients_cn.rst
+++ b/doc/fluid/api_cn/backward_cn/gradients_cn.rst
@@ -3,10 +3,13 @@
gradients
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.backward.gradients(targets, inputs, target_gradients=None, no_grad_set=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
将目标梯度反向传播到输入。
参数:
diff --git a/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst b/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst
index cce7037769c731b028d4645a5fe514fa576b0311..c62a90742856f5bd867e7548d432d818105a488e 100644
--- a/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst
+++ b/doc/fluid/api_cn/clip_cn/ErrorClipByValue_cn.rst
@@ -1,47 +1,50 @@
-.. _cn_api_fluid_clip_ErrorClipByValue:
-
-ErrorClipByValue
--------------------------------
-
-.. py:class:: paddle.fluid.clip.ErrorClipByValue(max, min=None)
-
-给定一个 Tensor ``t`` (该 Tensor 传入方式见代码示例),对 Tensor 中的元素超出给定最大 ``max`` 和最小界 ``min`` 内区间范围 [min, max] 的元素,重设为所超出界的界值。
-
-
-- 任何小于min(最小值)的值都被设置为 ``min``
-
-- 任何大于max(最大值)的值都被设置为 ``max``
-
-
-参数:
- - **max** (foat) - 要修剪的最大值。
- - **min** (float) - 要修剪的最小值。如果用户没有设置,将被框架默认设置为 ``-max`` 。
-
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
-
- BATCH_SIZE = 128
- CLIP_MAX = 2e-6
- CLIP_MIN = -1e-6
- prog = fluid.framework.Program()
-
- with fluid.program_guard(main_program=prog):
- image = fluid.layers.data(name='x', shape=[784], dtype='float32')
- hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
- hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
- predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
- label = fluid.layers.data(name='y', shape=[1], dtype='int64')
- cost = fluid.layers.cross_entropy(input=predict, label=label)
- avg_cost = fluid.layers.mean(cost)
- prog_clip = prog.clone()
- prog_clip.block(0).var(hidden1.name)._set_error_clip(
- fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN))
-
-
-
-
-
+.. _cn_api_fluid_clip_ErrorClipByValue:
+
+ErrorClipByValue
+-------------------------------
+
+.. py:class:: paddle.fluid.clip.ErrorClipByValue(max, min=None)
+
+
+
+
+给定一个 Tensor ``t`` (该 Tensor 传入方式见代码示例),对 Tensor 中的元素超出给定最大 ``max`` 和最小界 ``min`` 内区间范围 [min, max] 的元素,重设为所超出界的界值。
+
+
+- 任何小于min(最小值)的值都被设置为 ``min``
+
+- 任何大于max(最大值)的值都被设置为 ``max``
+
+
+参数:
+ - **max** (foat) - 要修剪的最大值。
+ - **min** (float) - 要修剪的最小值。如果用户没有设置,将被框架默认设置为 ``-max`` 。
+
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ BATCH_SIZE = 128
+ CLIP_MAX = 2e-6
+ CLIP_MIN = -1e-6
+ prog = fluid.framework.Program()
+
+ with fluid.program_guard(main_program=prog):
+ image = fluid.layers.data(name='x', shape=[784], dtype='float32')
+ hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
+ hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
+ predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
+ label = fluid.layers.data(name='y', shape=[1], dtype='int64')
+ cost = fluid.layers.cross_entropy(input=predict, label=label)
+ avg_cost = fluid.layers.mean(cost)
+ prog_clip = prog.clone()
+ prog_clip.block(0).var(hidden1.name)._set_error_clip(
+ fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN))
+
+
+
+
+
diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst
index 959c75d56bd36cbead37c58fc0a7cf4f344ac9e7..4b714c9c42f52a525902c836f166d54b78e3f318 100644
--- a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst
+++ b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst
@@ -1,87 +1,107 @@
-.. _cn_api_fluid_clip_GradientClipByGlobalNorm:
-
-GradientClipByGlobalNorm
--------------------------------
-
-.. py:class:: paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm, group_name='default_group')
-
-通过多个 Tensor 的范数之和的比率,来剪切(clip)多个 Tensor ( Tensor 不是从该类传入, 通过 ``fluid.program_guard`` 的 ``main_program`` 参数传入,即公式中的 :math:`t\_list` 见代码实例)。
-
-给定一个 Tensor 列表 :math:`t\_list` 和一个剪切比率 ``clip_norm`` ,返回该类的实例作为 ``set_gradient_clip`` 方法的第一个参数, ``set_gradient_clip`` 第二个参数是用来计算被剪切的 Tensor 列表(该值默认为 ``None`` 会基于所有 Tensor 列表来计算全局范数 ``global_norm`` 。
-
-剪切过程如下:
-
-.. math::
- \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\
-
-其中:
-
-.. math::
- \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\
-
-
-参数:
- - **clip_norm** (float) - 范数最大值
- - **group_name** (str, optional) - 剪切的组名
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import paddle.fluid.core as core
- import paddle
-
- place = core.CPUPlace()
- prog = fluid.framework.Program()
- startup_program = fluid.framework.Program()
- with fluid.program_guard(
- main_program=prog, startup_program=startup_program):
- image = fluid.layers.data(name='x', shape=[784], dtype='float32')
- label = fluid.layers.data(name='y', shape=[1], dtype='int64')
- hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
- hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
- predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
- cost = fluid.layers.cross_entropy(input=predict, label=label)
- avg_cost = fluid.layers.mean(cost)
-
- prog_clip = prog.clone()
- avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
-
- p_g = fluid.backward.append_backward(loss=avg_cost)
- p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)
-
- with fluid.program_guard(main_program=prog_clip, startup_program=startup_program):
- fluid.clip.set_gradient_clip(
- fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0))
- p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip)
-
- grad_list = [elem[1] for elem in p_g]
- grad_clip_list = [elem[1] for elem in p_g_clip]
-
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.mnist.train(), buf_size=8192),
- batch_size=128)
-
- exe = fluid.Executor(place)
- feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
- exe.run(startup_program)
-
- count = 0
- for data in train_reader():
- count += 1
- print("count:%s" % count)
- if count > 5:
- break
- out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
- out_clip = exe.run(prog_clip,
- feed=feeder.feed(data),
- fetch_list=grad_clip_list)
-
-
-
-
-
-
-
+.. _cn_api_fluid_clip_GradientClipByGlobalNorm:
+
+GradientClipByGlobalNorm
+-------------------------------
+
+.. py:class:: paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm, group_name='default_group', need_clip=None)
+
+:alias_main: paddle.nn.GradientClipByGlobalNorm
+:alias: paddle.nn.GradientClipByGlobalNorm,paddle.nn.clip.GradientClipByGlobalNorm
+:old_api: paddle.fluid.clip.GradientClipByGlobalNorm
+
+
+
+将一个 Tensor列表 :math:`t\_list` 中所有Tensor的L2范数之和,限定在 ``clip_norm`` 范围内。
+
+- 如果范数之和大于 ``clip_norm`` ,则所有 Tensor 会乘以一个系数进行压缩
+
+- 如果范数之和小于或等于 ``clip_norm`` ,则不会进行任何操作。
+
+输入的 Tensor列表 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。
+
+该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。
+
+裁剪公式如下:
+
+.. math::
+ \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\
+
+其中:
+
+.. math::
+ \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\
+
+
+参数:
+ - **clip_norm** (float) - 所允许的范数最大值
+ - **group_name** (str, optional) - 剪切的组名
+ - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。
+
+**代码示例1:静态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ main_prog = fluid.Program()
+ startup_prog = fluid.Program()
+ with fluid.program_guard(
+ main_program=main_prog, startup_program=startup_prog):
+ image = fluid.data(
+ name='x', shape=[-1, 2], dtype='float32')
+ predict = fluid.layers.fc(input=image, size=3, act='relu') #Trainable parameters: fc_0.w.0, fc_0.b.0
+ loss = fluid.layers.mean(predict)
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)
+
+ # 仅裁剪参数fc_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool
+ # def fileter_func(Parameter):
+ # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0)
+ # return Parameter.name=="fc_0.w_0"
+ # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
+ sgd_optimizer.minimize(loss)
+
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
+ exe.run(startup_prog)
+ out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
+
+
+**代码示例2:动态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+
+ with fluid.dygraph.guard():
+ linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0
+ inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
+ out = linear(fluid.dygraph.to_variable(inputs))
+ loss = fluid.layers.reduce_mean(out)
+ loss.backward()
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)
+
+ # 仅裁剪参数linear_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool
+ # def fileter_func(ParamBase):
+ # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0)
+ # return ParamBase.name == "linear_0.w_0"
+ # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断
+ # return ParamBase.name == linear.weight.name
+ # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGD(
+ learning_rate=0.1,
+ parameter_list=linear.parameters(),
+ grad_clip=clip)
+ sgd_optimizer.minimize(loss)
diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst
index d3eeb280eb89bdc30dcb681bee30316c9c6975a0..07199a3a60f6683fc06d4b7e4253f2ac58d84bdd 100644
--- a/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst
+++ b/doc/fluid/api_cn/clip_cn/GradientClipByNorm_cn.rst
@@ -1,79 +1,111 @@
-.. _cn_api_fluid_clip_GradientClipByNorm:
-
-GradientClipByNorm
--------------------------------
-
-.. py:class:: paddle.fluid.clip.GradientClipByNorm(clip_norm)
-
-将输入多维Tensor :math:`X` 转换为L2范数不超过给定的二范数最大值( ``clip_norm`` )的多维Tensor。(多维Tensor不是从该类传入, 而是通过 ``fluid.program_guard`` 的 ``main_program`` 参数传入)。
-
-该类限制了输入多维Tensor :math:`X` 的L2范数不会超过 ``clip_norm`` 。
-
-.. math::
-
- Out=
- \left\{
- \begin{aligned}
- & X & & if (norm(X) \leq clip\_norm)\\
- & \frac{clip\_norm∗X}{norm(X)} & & if (norm(X) > clip\_norm) \\
- \end{aligned}
- \right.
-
-
-其中 :math:`norm(X)` 代表 :math:`X` 的L2范数
-
-.. math::
- \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\
-
-参数:
- - **clip_norm** (float) - 二范数最大值
-
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import paddle.fluid.core as core
- import paddle
- place = core.CPUPlace()
- prog = fluid.framework.Program()
- startup_program = fluid.framework.Program()
- with fluid.program_guard(
- main_program=prog, startup_program=startup_program):
- image = fluid.layers.data(name='x', shape=[784], dtype='float32')
- label = fluid.layers.data(name='y', shape=[1], dtype='int64')
- hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
- hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
- predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
- cost = fluid.layers.cross_entropy(input=predict, label=label)
- avg_cost = fluid.layers.mean(cost)
- prog_clip = prog.clone()
- avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
- p_g = fluid.backward.append_backward(loss=avg_cost)
- p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)
- with fluid.program_guard(main_program=prog_clip, startup_program=startup_program):
- fluid.clip.set_gradient_clip(
- fluid.clip.GradientClipByNorm(clip_norm=2.0))
- p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip)
- grad_list = [elem[1] for elem in p_g]
- grad_clip_list = [elem[1] for elem in p_g_clip]
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.mnist.train(), buf_size=8192),
- batch_size=128)
-
- exe = fluid.Executor(place)
- feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
- exe.run(startup_program)
-
- count = 0
- for data in train_reader():
- count += 1
- print("count:%s" % count)
- if count > 5:
- break
- out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
- out_clip = exe.run(prog_clip,
- feed=feeder.feed(data),
- fetch_list=grad_clip_list)
+.. _cn_api_fluid_clip_GradientClipByNorm:
+
+GradientClipByNorm
+-------------------------------
+
+.. py:class:: paddle.fluid.clip.GradientClipByNorm(clip_norm, need_clip=None)
+
+:alias_main: paddle.nn.GradientClipByNorm
+:alias: paddle.nn.GradientClipByNorm,paddle.nn.clip.GradientClipByNorm
+:old_api: paddle.fluid.clip.GradientClipByNorm
+
+
+
+将输入的多维Tensor :math:`X` 的L2范数限制在 ``clip_norm`` 范围之内。
+
+- 如果L2范数大于 ``clip_norm`` ,则该 Tensor 会乘以一个系数进行压缩
+
+- 如果L2范数小于或等于 ``clip_norm`` ,则不会进行任何操作。
+
+输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。
+
+该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。
+
+裁剪公式如下:
+
+.. math::
+
+ Out=
+ \left\{
+ \begin{aligned}
+ & X & & if (norm(X) \leq clip\_norm)\\
+ & \frac{clip\_norm∗X}{norm(X)} & & if (norm(X) > clip\_norm) \\
+ \end{aligned}
+ \right.
+
+
+其中 :math:`norm(X)` 代表 :math:`X` 的L2范数
+
+.. math::
+ \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\
+
+参数:
+ - **clip_norm** (float) - 所允许的二范数最大值。
+ - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。
+
+**代码示例1:静态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ main_prog = fluid.Program()
+ startup_prog = fluid.Program()
+ with fluid.program_guard(
+ main_program=main_prog, startup_program=startup_prog):
+ image = fluid.data(
+ name='x', shape=[-1, 2], dtype='float32')
+ predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0
+ loss = fluid.layers.mean(predict)
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByNorm(clip_norm=1.0)
+
+ # 仅裁剪参数fc_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool
+ # def fileter_func(Parameter):
+ # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0)
+ # return Parameter.name=="fc_0.w_0"
+ # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
+ sgd_optimizer.minimize(loss)
+
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
+ exe.run(startup_prog)
+ out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
+
+
+**代码示例2:动态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+
+ with fluid.dygraph.guard():
+ linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0
+ inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
+ out = linear(fluid.dygraph.to_variable(inputs))
+ loss = fluid.layers.reduce_mean(out)
+ loss.backward()
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByNorm(clip_norm=1.0)
+
+ # 仅裁剪参数linear_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool
+ # def fileter_func(ParamBase):
+ # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0)
+ # return ParamBase.name == "linear_0.w_0"
+ # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,也可以此来判断
+ # return ParamBase.name == linear.weight.name
+ # clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGD(
+ learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip)
+ sgd_optimizer.minimize(loss)
\ No newline at end of file
diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst
index 236459f71277f9ea4fe80ba2a4bd9e3e98a33e0a..58e218cb9888a49d06bbfebdddce3a1506f5fc76 100644
--- a/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst
+++ b/doc/fluid/api_cn/clip_cn/GradientClipByValue_cn.rst
@@ -1,40 +1,100 @@
-.. _cn_api_fluid_clip_GradientClipByValue:
-
-GradientClipByValue
--------------------------------
-
-.. py:class:: paddle.fluid.clip.GradientClipByValue(max, min=None)
-
-将梯度值(gradient values)的范围压缩到 [min, max]。
-
-
-给定一个 Tensor ``t`` ,该操作将它的值压缩到 ``min`` 和 ``max`` 之间
-
-- 任何小于 ``min`` 的值都被设置为 ``min``
-
-- 任何大于 ``max`` 的值都被设置为 ``max``
-
-参数:
- - **max** (foat) - 要修剪的最大值。
- - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被 ``framework`` 设置为 ``-max`` 。
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- w_param_attrs = fluid.ParamAttr(name=None,
- initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
- learning_rate=1.0,
- regularizer=fluid.regularizer.L1Decay(1.0),
- trainable=True,
- gradient_clip=fluid.clip.GradientClipByValue(-1.0, 1.0))
- x = fluid.layers.data(name='x', shape=[10], dtype='float32')
- y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs)
-
-
-
-
-
-
-
+.. _cn_api_fluid_clip_GradientClipByValue:
+
+GradientClipByValue
+-------------------------------
+
+.. py:class:: paddle.fluid.clip.GradientClipByValue(max, min=None, need_clip=None)
+
+:alias_main: paddle.nn.GradientClipByValue
+:alias: paddle.nn.GradientClipByValue,paddle.nn.clip.GradientClipByValue
+:old_api: paddle.fluid.clip.GradientClipByValue
+
+
+
+
+将输入的多维Tensor :math:`X` 的值限制在 [min, max] 范围。
+
+输入的 Tensor 不是从该类里传入, 而是默认会选择 ``Program`` 中全部的梯度,如果 ``need_clip`` 不为None,则可以只选择部分参数进行梯度裁剪。
+
+该类需要在初始化 ``optimizer`` 时进行设置后才能生效,可参看 ``optimizer`` 文档(例如: :ref:`cn_api_fluid_optimizer_SGDOptimizer` )。
+
+给定一个 Tensor ``t`` ,该操作将它的值压缩到 ``min`` 和 ``max`` 之间
+
+- 任何小于 ``min`` 的值都被设置为 ``min``
+
+- 任何大于 ``max`` 的值都被设置为 ``max``
+
+参数:
+ - **max** (foat) - 要修剪的最大值。
+ - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被自动设置为 ``-max`` (此时 ``max`` 必须大于0)。
+ - **need_clip** (function, optional) - 类型: 函数。用于指定需要梯度裁剪的参数,该函数接收一个 ``Parameter`` ,返回一个 ``bool`` (True表示需要裁剪,False不需要裁剪)。默认为None,此时会裁剪网络中全部参数。
+
+**代码示例1:静态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ main_prog = fluid.Program()
+ startup_prog = fluid.Program()
+ with fluid.program_guard(
+ main_program=main_prog, startup_program=startup_prog):
+ image = fluid.data(
+ name='x', shape=[-1, 2], dtype='float32')
+ predict = fluid.layers.fc(input=image, size=3, act='relu') #可训练参数: fc_0.w.0, fc_0.b.0
+ loss = fluid.layers.mean(predict)
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByValue(min=-1, max=1)
+
+ # 仅裁剪参数fc_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为Parameter,返回类型为bool
+ # def fileter_func(Parameter):
+ # # 可以较为方便的通过Parameter.name判断(name可以在fluid.ParamAttr中设置,默认为fc_0.w_0、fc_0.b_0)
+ # return Parameter.name=="fc_0.w_0"
+ # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
+ sgd_optimizer.minimize(loss)
+
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
+ exe.run(startup_prog)
+ out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
+
+
+**代码示例2:动态图**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+
+ with fluid.dygraph.guard():
+ linear = fluid.dygraph.Linear(10, 10) #可训练参数: linear_0.w.0, linear_0.b.0
+ inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
+ out = linear(fluid.dygraph.to_variable(inputs))
+ loss = fluid.layers.reduce_mean(out)
+ loss.backward()
+
+ # 裁剪网络中全部参数:
+ clip = fluid.clip.GradientClipByValue(min=-1, max=1)
+
+ # 仅裁剪参数linear_0.w_0时:
+ # 为need_clip参数传入一个函数fileter_func,fileter_func接收参数的类型为ParamBase,返回类型为bool
+ # def fileter_func(ParamBase):
+ # # 可以通过ParamBase.name判断(name可以在fluid.ParamAttr中设置,默认为linear_0.w_0、linear_0.b_0)
+ # return ParamBase.name == "linear_0.w_0"
+ # # 注:linear.weight、linear.bias能分别返回dygraph.Linear层的权重与偏差,可以此来判断
+ # return ParamBase.name == linear.weight.name
+ # clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func)
+
+ sgd_optimizer = fluid.optimizer.SGD(
+ learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip)
+ sgd_optimizer.minimize(loss)
+
+
+
diff --git a/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst b/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst
index f27853f781f873d188be77ac57e90a9f6352e297..eae01c0c3f58774f874caf19785444c7dae3df64 100644
--- a/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst
+++ b/doc/fluid/api_cn/clip_cn/set_gradient_clip_cn.rst
@@ -3,16 +3,24 @@
set_gradient_clip
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.clip.set_gradient_clip(clip, param_list=None, program=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
+.. warning::
+ 此API对位置使用的要求较高,其必须位于组建网络之后, ``minimize`` 之前,因此在未来版本中可能被删除,故不推荐使用。推荐在 ``optimizer`` 初始化时设置梯度裁剪。
+ 有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
+ 如果在 ``optimizer`` 中设置过梯度裁剪,又使用了 ``set_gradient_clip`` ,``set_gradient_clip`` 将不会生效。
+
给指定参数做梯度裁剪。
参数:
- - **clip** (BaseGradientClipAttr) - BaseGradientClipAttr子类的实例,如 :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 等,用于描述具体的裁剪方法和属性。
+ - **clip** (GradientClipBase) - 梯度裁剪的策略,如 :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 等,用于描述具体的裁剪方法和属性。
- **param_list** (list(Variable),可选) - 需要裁剪的参数列表,可以是参数或参数名称列表。默认值为None,表示裁剪 ``program`` 中的所有参数。
- - **program** (Program,可选) - 参数所在的Program。默认值为None,表示使用 :ref:`cn_api_fluid_default_main_program`。
+ - **program** (Program,可选) - 参数所在的Program。默认值为None,表示使用 :ref:`cn_api_fluid_default_main_program` 。
返回: 无。
@@ -59,3 +67,18 @@ set_gradient_clip
param_list=[param_var1, param_var2])
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
+
+ # network 4: use set_gradient_clip and minimize(grad_clip=clip) together
+ with fluid.program_guard(fluid.Program(), fluid.Program()):
+ loss = network()
+ param_var1 = fluid.default_main_program().global_block().var("fc1_param")
+ param_var2 = fluid.default_main_program().global_block().var("fc2_param")
+ clip1 = fluid.clip.GradientClipByValue(min=-1.0, max=1.0)
+ clip2 = fluid.clip.GradientClipByNorm(clip_norm=1.0)
+ # 设置梯度裁剪策略:clip1
+ fluid.clip.set_gradient_clip(clip1)
+
+ # 设置梯度裁剪策略:clip2
+ sgd = fluid.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2)
+ sgd.minimize(loss)
+ # 有设置冲突时,set_gradient_clip将不会生效,将以clip2的策略进行梯度裁剪
diff --git a/doc/fluid/api_cn/dataset_cn.rst b/doc/fluid/api_cn/dataset_cn.rst
index 076a7ea5e1fcba84eeb71cdeadfa0c3fc3092adf..6ad084eaa9764cd4b5b0c03abaa6e6c011688e89 100644
--- a/doc/fluid/api_cn/dataset_cn.rst
+++ b/doc/fluid/api_cn/dataset_cn.rst
@@ -1,5 +1,5 @@
=======================
-fluid.dataset
+paddle.dataset
=======================
diff --git a/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst b/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst
index 9641efa1cdb61d4b6eec7a986f8ebaa31ee553ec..901d32c2069c8905031d8f3d9b6abdc89730876a 100644
--- a/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst
+++ b/doc/fluid/api_cn/dataset_cn/DatasetFactory_cn.rst
@@ -1,34 +1,37 @@
-.. _cn_api_fluid_dataset_DatasetFactory:
-
-DatasetFactory
--------------------------------
-
-.. py:class:: paddle.fluid.dataset.DatasetFactory
-
-DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。
-
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
-
-.. py:method:: create_dataset(datafeed_class='QueueDataset')
-
-创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。
-
-
-参数:
- - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
-
-
-
+.. _cn_api_fluid_dataset_DatasetFactory:
+
+DatasetFactory
+-------------------------------
+
+.. py:class:: paddle.fluid.dataset.DatasetFactory
+
+
+
+
+DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创建“QueueDataset”,“InMemoryDataset”或“FileInstantDataset”,默认为“QueueDataset”。
+
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+
+.. py:method:: create_dataset(datafeed_class='QueueDataset')
+
+创建“QueueDataset”,“InMemoryDataset” 或 “FileInstantDataset”,默认为“QueueDataset”。
+
+
+参数:
+ - **datafeed_class** (str) – datafeed类名,为QueueDataset或InMemoryDataset。默认为QueueDataset。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+
+
+
diff --git a/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst b/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst
index fc2878f1fc81b907125f236cf92d905190fa194f..7699284c681fd87008d3b8ad41db89a93fd8d788 100644
--- a/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst
+++ b/doc/fluid/api_cn/dataset_cn/InMemoryDataset_cn.rst
@@ -1,354 +1,357 @@
-.. _cn_api_fluid_dataset_InMemoryDataset:
-
-InMemoryDataset
--------------------------------
-
-.. py:class:: paddle.fluid.dataset.InMemoryDataset
-
-InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类由DatasetFactory创建。
-
-**代码示例**:
-
-.. code-block:: python
-
- dataset = paddle.fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
-
-.. py:method:: set_queue_num(queue_num)
-
-设置 ``Dataset`` 输出队列数量,训练进程会从队列中获取数据。
-
-参数:
- - **queue_num** (int) - dataset输出队列数量
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- dataset.set_queue_num(12)
-
-.. py:method:: set_fleet_send_batch_size(fleet_send_batch_size)
-
-设置发送batch的大小
-
-参数:
- - **fleet_send_batch_size** (int) - 设置发送batch的大小。
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- dataset.set_fleet_send_batch_size(800)
-
-.. py:method:: set_merge_by_lineid(var_list, erase_duplicate_feas=True, min_merge_size=2, keep_unmerged-ins=True)
-
-通过样本id来设置合并,一些线id的实例将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。
-
-参数:
- - **var_list** (list) - 可以被合并的特征列表,其中的每一个元素都是一个 ``Variable`` 。一些类特征我们通常不把它们合并为同样的样本id,所以用户应当指定哪个类特征可以被合并。
- - **erase_duplicate_feas** (bool) - 合并的时候是否删除重复的特征值。默认为True。
- - **min_merge_size** (int) - 合并的最小数量。默认为2。
- - **keep_unmerged_ins** (bool) - 是否保留没有合并的样本,比如有着独特id的样本,或者重复id的数量小于 ``min_merge_size`` 的样本。
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- dataset.set_merge_by_lineid()
-
-.. py:method:: load_into_memory()
-
-向内存中加载数据。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
-
-.. py:method:: preload_into_memory()
-
-向内存中以异步模式加载数据。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.preload_into_memory()
- dataset.wait_preload_done()
-
-.. py:method:: wait_preload_done()
-
-等待 ``preload_into_memory`` 完成。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.preload_into_memory()
- dataset.wait_preload_done()
-
-.. py:method:: local_shuffle()
-
-局域shuffle。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
- dataset.local_shuffle()
-
-
-.. py:method:: global_shuffle(fleet=None)
-
-全局shuffle。
-
-只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
- dataset.global_shuffle(fleet)
-
-参数:
- - **fleet** (Fleet) – fleet单例。默认为None。
-
-
-.. py:method:: release_memory()
-
-当数据不再使用时,释放InMemoryDataset内存数据。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
- dataset.global_shuffle(fleet)
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
- exe.train_from_dataset(fluid.default_main_program(), dataset)
- dataset.release_memory()
-
-.. py:method:: get_memory_data_size(fleet=None)
-
-用户可以调用此函数以了解加载进内存后所有workers中的样本数量。
-
-.. note::
- 该函数可能会导致性能不佳,因为它具有barrier。
-
-参数:
- - **fleet** (Fleet) – fleet对象。
-
-返回:内存数据的大小。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
- print dataset.get_memory_data_size(fleet)
-
-
-.. py:method:: get_shuffle_data_size(fleet=None)
-
-获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。
-
-.. note::
- 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。
-
-参数:
- - **fleet** (Fleet) – fleet对象。
-
-返回:shuffle数据的大小。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- filelist = ["a.txt", "b.txt"]
- dataset.set_filelist(filelist)
- dataset.load_into_memory()
- dataset.global_shuffle(fleet)
- print dataset.get_shuffle_data_size(fleet)
-
-
-.. py:method:: set_batch_size(batch_size)
-
-设置batch size。在训练期间生效。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_batch_size(128)
-
-参数:
- - **batch_size** (int) - batch size
-
-.. py:method:: set_fea_eval(record_candidate_size, fea_eval=True)
-
-设置特征打乱特征验证模式,来修正特征level的重要性, 特征打乱需要 ``fea_eval`` 被设置为True。
-
-参数:
- - **record_candidate_size** (int) - 打乱一个特征的候选实例大小
- - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
- dataset.set_fea_eval(1000000, True)
-
-.. py:method:: desc()
-
-为 ``DataFeedDesc`` 返回一个缓存信息。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- print(dataset.desc())
-
-返回:一个字符串信息
-
-.. py:method:: set_filelist(filelist)
-
-在当前的worker中设置文件列表。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_filelist(["a.txt", "b.txt"])
-
-参数:
- - **filelist** (list) - 文件列表
-
-.. py:method:: set_hdfs_config(fs_name, fs_ugi)
-
-设置hdfs配置:fs名称与ugi。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_hdfs_config("my_fs_name", "my_fs_ugi")
-
-参数:
- - **fs_name** (str) - fs名称
- - **fs_ugi** (str) - fs ugi
-
-.. py:method:: set_pipe_command(pipe_coommand)
-
-在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_pipe_command("python my_script.py")
-
-参数:
- - **pipe_command** (str) - pipe命令
-
-.. py:method:: set_thread(thread_num)
-
-设置进程数量,等于readers的数量。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_thread(12)
-
-参数:
- - **thread_num** (int) - 进程数量
-
-.. py:method:: set_use_var(var_list)
-
-设置将要使用的 ``Variable`` 。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_use_var([data, label])
-
-参数:
- - **var_list** (list) - variable 列表
-
-.. py:method:: slots_shuffle(slots)
-
-该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。
-
-参数:
- - **slots** (list[string]) - 要打乱特征的集合
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
- dataset.set_merge_by_lineid()
- #支持slot 0
- dataset.slots_shuffle([‘0’])
-
-
-
+.. _cn_api_fluid_dataset_InMemoryDataset:
+
+InMemoryDataset
+-------------------------------
+
+.. py:class:: paddle.fluid.dataset.InMemoryDataset
+
+
+
+
+InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类由DatasetFactory创建。
+
+**代码示例**:
+
+.. code-block:: python
+
+ dataset = paddle.fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
+
+.. py:method:: set_queue_num(queue_num)
+
+设置 ``Dataset`` 输出队列数量,训练进程会从队列中获取数据。
+
+参数:
+ - **queue_num** (int) - dataset输出队列数量
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ dataset.set_queue_num(12)
+
+.. py:method:: set_fleet_send_batch_size(fleet_send_batch_size)
+
+设置发送batch的大小
+
+参数:
+ - **fleet_send_batch_size** (int) - 设置发送batch的大小。
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ dataset.set_fleet_send_batch_size(800)
+
+.. py:method:: set_merge_by_lineid(var_list, erase_duplicate_feas=True, min_merge_size=2, keep_unmerged-ins=True)
+
+通过样本id来设置合并,一些线id的实例将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。
+
+参数:
+ - **var_list** (list) - 可以被合并的特征列表,其中的每一个元素都是一个 ``Variable`` 。一些类特征我们通常不把它们合并为同样的样本id,所以用户应当指定哪个类特征可以被合并。
+ - **erase_duplicate_feas** (bool) - 合并的时候是否删除重复的特征值。默认为True。
+ - **min_merge_size** (int) - 合并的最小数量。默认为2。
+ - **keep_unmerged_ins** (bool) - 是否保留没有合并的样本,比如有着独特id的样本,或者重复id的数量小于 ``min_merge_size`` 的样本。
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ dataset.set_merge_by_lineid()
+
+.. py:method:: load_into_memory()
+
+向内存中加载数据。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+
+.. py:method:: preload_into_memory()
+
+向内存中以异步模式加载数据。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.preload_into_memory()
+ dataset.wait_preload_done()
+
+.. py:method:: wait_preload_done()
+
+等待 ``preload_into_memory`` 完成。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.preload_into_memory()
+ dataset.wait_preload_done()
+
+.. py:method:: local_shuffle()
+
+局域shuffle。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+ dataset.local_shuffle()
+
+
+.. py:method:: global_shuffle(fleet=None)
+
+全局shuffle。
+
+只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+ dataset.global_shuffle(fleet)
+
+参数:
+ - **fleet** (Fleet) – fleet单例。默认为None。
+
+
+.. py:method:: release_memory()
+
+当数据不再使用时,释放InMemoryDataset内存数据。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+ dataset.global_shuffle(fleet)
+ exe = fluid.Executor(fluid.CPUPlace())
+ exe.run(fluid.default_startup_program())
+ exe.train_from_dataset(fluid.default_main_program(), dataset)
+ dataset.release_memory()
+
+.. py:method:: get_memory_data_size(fleet=None)
+
+用户可以调用此函数以了解加载进内存后所有workers中的样本数量。
+
+.. note::
+ 该函数可能会导致性能不佳,因为它具有barrier。
+
+参数:
+ - **fleet** (Fleet) – fleet对象。
+
+返回:内存数据的大小。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+ print dataset.get_memory_data_size(fleet)
+
+
+.. py:method:: get_shuffle_data_size(fleet=None)
+
+获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。
+
+.. note::
+ 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。
+
+参数:
+ - **fleet** (Fleet) – fleet对象。
+
+返回:shuffle数据的大小。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
+ dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
+ filelist = ["a.txt", "b.txt"]
+ dataset.set_filelist(filelist)
+ dataset.load_into_memory()
+ dataset.global_shuffle(fleet)
+ print dataset.get_shuffle_data_size(fleet)
+
+
+.. py:method:: set_batch_size(batch_size)
+
+设置batch size。在训练期间生效。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_batch_size(128)
+
+参数:
+ - **batch_size** (int) - batch size
+
+.. py:method:: set_fea_eval(record_candidate_size, fea_eval=True)
+
+设置特征打乱特征验证模式,来修正特征level的重要性, 特征打乱需要 ``fea_eval`` 被设置为True。
+
+参数:
+ - **record_candidate_size** (int) - 打乱一个特征的候选实例大小
+ - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
+ dataset.set_fea_eval(1000000, True)
+
+.. py:method:: desc()
+
+为 ``DataFeedDesc`` 返回一个缓存信息。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ print(dataset.desc())
+
+返回:一个字符串信息
+
+.. py:method:: set_filelist(filelist)
+
+在当前的worker中设置文件列表。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_filelist(["a.txt", "b.txt"])
+
+参数:
+ - **filelist** (list) - 文件列表
+
+.. py:method:: set_hdfs_config(fs_name, fs_ugi)
+
+设置hdfs配置:fs名称与ugi。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_hdfs_config("my_fs_name", "my_fs_ugi")
+
+参数:
+ - **fs_name** (str) - fs名称
+ - **fs_ugi** (str) - fs ugi
+
+.. py:method:: set_pipe_command(pipe_coommand)
+
+在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_pipe_command("python my_script.py")
+
+参数:
+ - **pipe_command** (str) - pipe命令
+
+.. py:method:: set_thread(thread_num)
+
+设置进程数量,等于readers的数量。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_thread(12)
+
+参数:
+ - **thread_num** (int) - 进程数量
+
+.. py:method:: set_use_var(var_list)
+
+设置将要使用的 ``Variable`` 。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_use_var([data, label])
+
+参数:
+ - **var_list** (list) - variable 列表
+
+.. py:method:: slots_shuffle(slots)
+
+该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。
+
+参数:
+ - **slots** (list[string]) - 要打乱特征的集合
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
+ dataset.set_merge_by_lineid()
+ #支持slot 0
+ dataset.slots_shuffle([‘0’])
+
+
+
diff --git a/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst b/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst
index 2046dcb2b9d47fc46629874949035b5e4fceb487..298bf2bb4ecc8b356327af82570e7bb07e2e2907 100644
--- a/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst
+++ b/doc/fluid/api_cn/dataset_cn/QueueDataset_cn.rst
@@ -1,185 +1,188 @@
-.. _cn_api_fluid_dataset_QueueDataset:
-
-QueueDataset
--------------------------------
-
-.. py:class:: paddle.fluid.dataset.QueueDataset
-
-流式处理数据。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
-
-
-
-.. py:method:: local_shuffle()
-
-局域shuffle数据
-
-QueueDataset中不支持局域shuffle,可能抛出NotImplementedError
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
- dataset.local_shuffle()
-
-
-
-.. py:method:: global_shuffle(fleet=None)
-
-全局shuffle数据
-
-QueueDataset中不支持全局shuffle,可能抛出NotImplementedError
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
- dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
- dataset.global_shuffle(fleet)
-
-.. py:method:: desc()
-
-为 ``DataFeedDesc`` 返回一个缓存信息。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- print(dataset.desc())
-
-返回:一个字符串信息
-
-.. py:method:: set_batch_size(batch_size)
-
-设置batch size。在训练期间生效。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_batch_size(128)
-
-参数:
- - **batch_size** (int) - batch size
-
-.. py:method:: set_fea_eval(record_candidate_size,fea_eval)
-
-参数:
- - **record_candidate_size** (int) - 打乱一个特征的候选实例大小
- - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
- dataset.set_fea_eval(1000000, True)
-
-.. py:method:: set_filelist(filelist)
-
-在当前的worker中设置文件列表。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_filelist(["a.txt", "b.txt"])
-
-参数:
- - **filelist** (list) - 文件列表
-
-.. py:method:: set_hdfs_config(fs_name, fs_ugi)
-
-设置hdfs配置:fs名称与ugi。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_hdfs_config("my_fs_name", "my_fs_ugi")
-
-参数:
- - **fs_name** (str) - fs名称
- - **fs_ugi** (str) - fs ugi
-
-.. py:method:: set_pipe_command(pipe_coommand)
-
-在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_pipe_command("python my_script.py")
-
-参数:
- - **pipe_command** (str) - pipe命令
-
-.. py:method:: set_thread(thread_num)
-
-设置进程数量,等于readers的数量。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_thread(12)
-
-参数:
- - **thread_num** (int) - 进程数量
-
-.. py:method:: set_use_var(var_list)
-
-设置将要使用的 ``Variable`` 。
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset()
- dataset.set_use_var([data, label])
-
-参数:
- - **var_list** (list) - variable 列表
-
-.. py:method:: slots_shuffle(slots)
-
-该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。
-
-参数:
- - **slots** (list[string]) - 要打乱特征的集合
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
- dataset.set_merge_by_lineid()
- #支持slot 0
- dataset.slots_shuffle([‘0’])
-
+.. _cn_api_fluid_dataset_QueueDataset:
+
+QueueDataset
+-------------------------------
+
+.. py:class:: paddle.fluid.dataset.QueueDataset
+
+
+
+
+流式处理数据。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
+
+
+
+.. py:method:: local_shuffle()
+
+局域shuffle数据
+
+QueueDataset中不支持局域shuffle,可能抛出NotImplementedError
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
+ dataset.local_shuffle()
+
+
+
+.. py:method:: global_shuffle(fleet=None)
+
+全局shuffle数据
+
+QueueDataset中不支持全局shuffle,可能抛出NotImplementedError
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
+ dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
+ dataset.global_shuffle(fleet)
+
+.. py:method:: desc()
+
+为 ``DataFeedDesc`` 返回一个缓存信息。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ print(dataset.desc())
+
+返回:一个字符串信息
+
+.. py:method:: set_batch_size(batch_size)
+
+设置batch size。在训练期间生效。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_batch_size(128)
+
+参数:
+ - **batch_size** (int) - batch size
+
+.. py:method:: set_fea_eval(record_candidate_size,fea_eval)
+
+参数:
+ - **record_candidate_size** (int) - 打乱一个特征的候选实例大小
+ - **fea_eval** (bool) - 是否设置特征验证模式来打乱特征,默认为True。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
+ dataset.set_fea_eval(1000000, True)
+
+.. py:method:: set_filelist(filelist)
+
+在当前的worker中设置文件列表。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_filelist(["a.txt", "b.txt"])
+
+参数:
+ - **filelist** (list) - 文件列表
+
+.. py:method:: set_hdfs_config(fs_name, fs_ugi)
+
+设置hdfs配置:fs名称与ugi。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_hdfs_config("my_fs_name", "my_fs_ugi")
+
+参数:
+ - **fs_name** (str) - fs名称
+ - **fs_ugi** (str) - fs ugi
+
+.. py:method:: set_pipe_command(pipe_coommand)
+
+在当前的 ``dataset`` 中设置pipe命令。pipe命令只能使用UNIX的pipe命令
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_pipe_command("python my_script.py")
+
+参数:
+ - **pipe_command** (str) - pipe命令
+
+.. py:method:: set_thread(thread_num)
+
+设置进程数量,等于readers的数量。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_thread(12)
+
+参数:
+ - **thread_num** (int) - 进程数量
+
+.. py:method:: set_use_var(var_list)
+
+设置将要使用的 ``Variable`` 。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset()
+ dataset.set_use_var([data, label])
+
+参数:
+ - **var_list** (list) - variable 列表
+
+.. py:method:: slots_shuffle(slots)
+
+该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。
+
+参数:
+ - **slots** (list[string]) - 要打乱特征的集合
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ dataset = fluid.DatasetFactory().create_dataset(“InMemoryDataset”)
+ dataset.set_merge_by_lineid()
+ #支持slot 0
+ dataset.slots_shuffle([‘0’])
+
diff --git a/doc/fluid/api_cn/declarative_cn.rst b/doc/fluid/api_cn/declarative_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..91a1c40516b91d21fa735f5b18b17fafd5d5dba6
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn.rst
@@ -0,0 +1,28 @@
+=======================
+paddle.declarative
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ declarative_cn/batch_norm_cn.rst
+ declarative_cn/bilinear_tensor_product_cn.rst
+ declarative_cn/conv2d_cn.rst
+ declarative_cn/conv2d_transpose_cn.rst
+ declarative_cn/conv3d_cn.rst
+ declarative_cn/conv3d_transpose_cn.rst
+ declarative_cn/create_parameter_cn.rst
+ declarative_cn/crf_decoding_cn.rst
+ declarative_cn/data_norm_cn.rst
+ declarative_cn/deformable_conv_cn.rst
+ declarative_cn/embedding_cn.rst
+ declarative_cn/fc_cn.rst
+ declarative_cn/group_norm_cn.rst
+ declarative_cn/hsigmoid_cn.rst
+ declarative_cn/instance_norm_cn.rst
+ declarative_cn/layer_norm_cn.rst
+ declarative_cn/multi_box_head_cn.rst
+ declarative_cn/nce_cn.rst
+ declarative_cn/prelu_cn.rst
+ declarative_cn/row_conv_cn.rst
+ declarative_cn/spectral_norm_cn.rst
diff --git a/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aa4fde30267813a7f4a750ca0b8ba77cbc319e03
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/batch_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_batch_norm:
+
+batch_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.batch_norm
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst b/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..18c59e8a49bbaab3d7a2878b51e0eee067c06348
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/bilinear_tensor_product_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_bilinear_tensor_product:
+
+bilinear_tensor_product
+-------------------------------
+:doc_source: paddle.fluid.layers.bilinear_tensor_product
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst b/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b64c16df0ff49223b470a1ebb5819f8590ba9f79
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/conv2d_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_conv2d:
+
+conv2d
+-------------------------------
+:doc_source: paddle.fluid.layers.conv2d
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst b/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1f02331663cc0662ba2db3419752111a0de8dc07
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/conv2d_transpose_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_conv2d_transpose:
+
+conv2d_transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.conv2d_transpose
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst b/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..271dd69c77d37598e0828b0c32f231dee4e6568e
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/conv3d_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_conv3d:
+
+conv3d
+-------------------------------
+:doc_source: paddle.fluid.layers.conv3d
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst b/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bcde71e60369226cd1ca8a4f230e11034c16a18f
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/conv3d_transpose_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_conv3d_transpose:
+
+conv3d_transpose
+-------------------------------
+:doc_source: paddle.fluid.layers.conv3d_transpose
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst b/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..489beae017352305b3a20ec705e9310e8ae15250
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/create_parameter_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_create_parameter:
+
+create_parameter
+-------------------------------
+:doc_source: paddle.fluid.layers.create_parameter
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst b/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e8194b5b0bd48d4fb52edda70023ce70cce8096c
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/crf_decoding_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_crf_decoding:
+
+crf_decoding
+-------------------------------
+:doc_source: paddle.fluid.layers.crf_decoding
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a52b0ab1d612c86a06972b9a40787c54469d8829
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/data_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_data_norm:
+
+data_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.data_norm
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst b/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4008e90948f7b34aa86b26d79d2fe54ed885d929
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/deformable_conv_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_deformable_conv:
+
+deformable_conv
+-------------------------------
+:doc_source: paddle.fluid.layers.deformable_conv
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/embedding_cn.rst b/doc/fluid/api_cn/declarative_cn/embedding_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..296102ffed1a3c21afe617836cf38171c4dc9cc7
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/embedding_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_embedding:
+
+embedding
+-------------------------------
+:doc_source: paddle.fluid.input.embedding
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/fc_cn.rst b/doc/fluid/api_cn/declarative_cn/fc_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a0aa6224d58fb79ace46565d6c3f1ccc9e1f7fdc
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/fc_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_fc:
+
+fc
+-------------------------------
+:doc_source: paddle.fluid.layers.fc
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7e49fc62ce63bdd4fb738f26299a6fe09d87d798
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/group_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_group_norm:
+
+group_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.group_norm
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst b/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9fe20b0cdc08acacedec20998ad6640957539200
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/hsigmoid_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_hsigmoid:
+
+hsigmoid
+-------------------------------
+:doc_source: paddle.fluid.layers.hsigmoid
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f21555863014924fabf53a1bcb2d8211140edd1f
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/instance_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_instance_norm:
+
+instance_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.instance_norm
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..71f6ec2cc327ff06a1d5dcd44db2068531c0f1b2
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/layer_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_layer_norm:
+
+layer_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.layer_norm
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst b/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d942c82de8b45b511fdc00869d60c4c886b0a36
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/multi_box_head_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_multi_box_head:
+
+multi_box_head
+-------------------------------
+:doc_source: paddle.fluid.layers.multi_box_head
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/nce_cn.rst b/doc/fluid/api_cn/declarative_cn/nce_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b6d4bbcbe292ef88334cad9094d767067265eccd
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/nce_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_nce:
+
+nce
+-------------------------------
+:doc_source: paddle.fluid.layers.nce
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/prelu_cn.rst b/doc/fluid/api_cn/declarative_cn/prelu_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..82fef8c6e06b84e06665e247cf88025178857da7
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/prelu_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_prelu:
+
+prelu
+-------------------------------
+:doc_source: paddle.fluid.layers.prelu
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst b/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fd07da80492eb4dab754a2689af24badccbd0725
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/row_conv_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_row_conv:
+
+row_conv
+-------------------------------
+:doc_source: paddle.fluid.layers.row_conv
+
+
diff --git a/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst b/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..17ffd21a9b95131283ffb99a50a86073474d4798
--- /dev/null
+++ b/doc/fluid/api_cn/declarative_cn/spectral_norm_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_declarative_cn_spectral_norm:
+
+spectral_norm
+-------------------------------
+:doc_source: paddle.fluid.layers.spectral_norm
+
+
diff --git a/doc/fluid/api_cn/distributed_cn.rst b/doc/fluid/api_cn/distributed_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ff75e9ce3331c65f8313ac160d8750e313527231
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn.rst
@@ -0,0 +1,22 @@
+=======================
+paddle.distributed
+=======================
+
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ distributed_cn/all_gather_cn.rst
+ distributed_cn/all_reduce_cn.rst
+ distributed_cn/barrier_cn.rst
+ distributed_cn/broadcast_cn.rst
+ distributed_cn/get_rank_cn.rst
+ distributed_cn/get_world_size_cn.rst
+ distributed_cn/init_parallel_env_cn.rst
+ distributed_cn/ParallelEnv_cn.rst
+ distributed_cn/prepare_context_cn.rst
+ distributed_cn/reduce_cn.rst
+ distributed_cn/scatter_cn.rst
+ distributed_cn/spawn_cn.rst
diff --git a/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c56a483466aa81edc4910df07c8078bdccd095e1
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/ParallelEnv_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_distributed_ParallelEnv:
+
+ParallelEnv
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv
\ No newline at end of file
diff --git a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6cbe19db63cad604c75657c9da83b18951475fdc
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst
@@ -0,0 +1,44 @@
+.. _cn_api_distributed_all_gather:
+
+all_gather
+-------------------------------
+
+
+.. py:function:: paddle.distributed.all_gather(tensor_list, tensor, group=0)
+
+进程组内所有进程的指定tensor进行聚合操作,并返回给所有进程聚合的结果。
+
+参数
+:::::::::
+ - tensor_list (list) - 操作的输出Tensor列表。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - tensor (Tensor) - 操作的输入Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ tensor_list = []
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ np_data1 = np.array([[4, 5, 6], [4, 5, 6]])
+ np_data2 = np.array([[4, 5, 6], [4, 5, 6]])
+ data1 = paddle.to_tensor(np_data1)
+ data2 = paddle.to_tensor(np_data2)
+ paddle.distributed.all_gather(tensor_list, data1)
+ else:
+ np_data1 = np.array([[1, 2, 3], [1, 2, 3]])
+ np_data2 = np.array([[1, 2, 3], [1, 2, 3]])
+ data1 = paddle.to_tensor(np_data1)
+ data2 = paddle.to_tensor(np_data2)
+ paddle.distributed.all_gather(tensor_list, data2)
diff --git a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc183c32190520eafa94e715dfc1a4822f80e54a
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst
@@ -0,0 +1,40 @@
+.. _cn_api_distributed_all_reduce:
+
+all_reduce
+-------------------------------
+
+
+.. py:function:: paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0)
+
+进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。
+
+参数
+:::::::::
+ - tensor (Tensor) - 操作的输入Tensor,同时也会将归约结果返回至此Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ from paddle.distributed import ReduceOp
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ np_data = np.array([[4, 5, 6], [4, 5, 6]])
+ else:
+ np_data = np.array([[1, 2, 3], [1, 2, 3]])
+ data = paddle.to_tensor(np_data)
+ paddle.distributed.all_reduce(data)
+ out = data.numpy()
+ # [[5, 7, 9], [5, 7, 9]]
diff --git a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb63526cfd0163cec91396144b2e0c3c0f9beace
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst
@@ -0,0 +1,29 @@
+.. _cn_api_distributed_barrier:
+
+barrier
+-------------------------------
+
+
+.. py:function:: paddle.distributed.barrier(group=0)
+
+同步进程组内的所有进程。
+
+参数
+:::::::::
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import paddle
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ paddle.distributed.barrier()
diff --git a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..33e653b56648ff6f97b64b19fffcf64bca4a989c
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst
@@ -0,0 +1,39 @@
+.. _cn_api_distributed_broadcast:
+
+broadcast
+-------------------------------
+
+
+.. py:function:: paddle.distributed.broadcast(tensor, src, group=0)
+
+广播一个Tensor给其他所有进程
+
+参数
+:::::::::
+ - tensor (Tensor) - 如果当前进程编号是源,那么这个Tensor变量将被发送给其他进程,否则这个Tensor将接收源发送过来的数据。Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - src (int) - 发送源的进程编号。
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ np_data = np.array([[4, 5, 6], [4, 5, 6]])
+ else:
+ np_data = np.array([[1, 2, 3], [1, 2, 3]])
+ data = paddle.to_tensor(np_data)
+ paddle.distributed.broadcast(data, 1)
+ out = data.numpy()
+ # [[1, 2, 3], [1, 2, 3]]
diff --git a/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst b/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75ad8fc79baa6a560fe956799c1a00bc9d67376d
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/get_rank_cn.rst
@@ -0,0 +1,25 @@
+.. _cn_api_distributed_get_rank:
+
+get_rank
+----------
+
+.. py:function:: paddle.distributed.get_rank()
+
+返回当前进程的rank。
+
+当前进程rank的值等于环境变量 ``PADDLE_TRAINER_ID`` 的值,默认值为0。
+
+返回
+:::::::::
+(int) 当前进程的rank。
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import paddle
+ import paddle.distributed as dist
+
+ # execute this command in terminal: export PADDLE_TRAINER_ID=0
+ print("The rank is %d" % dist.get_rank())
+ # The rank is 0
diff --git a/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst b/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08342de3c1f44b96762eefb1d9ae96918112d9dd
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/get_world_size_cn.rst
@@ -0,0 +1,25 @@
+.. _cn_api_distributed_get_world_size:
+
+get_world_size
+----------------
+
+.. py:function:: paddle.distributed.get_world_size()
+
+返回参与当前任务的进程数。
+
+当前进程数等于环境变量 ``PADDLE_TRAINERS_NUM`` 的值,默认值为1。
+
+返回
+:::::::::
+(int) 参与任务的进程数。
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import paddle
+ import paddle.distributed as dist
+
+ # execute this command in terminal: export PADDLE_TRAINERS_NUM=4
+ print("The world_size is %d" % dist.get_world_size())
+ # The world_size is 4
diff --git a/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst b/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..eafe9f10f0548931828798b28896ef59f211eefe
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/init_parallel_env_cn.rst
@@ -0,0 +1,64 @@
+.. _cn_api_distributed_init_parallel_env:
+
+init_parallel_env
+-----------------
+
+.. py:function:: paddle.distributed.init_parallel_env()
+
+初始化动态图模式下的并行训练环境。
+
+.. note::
+ 目前仅支持初始化GPU训练环境,使用NCCL进行通信。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import paddle
+ import paddle.nn as nn
+ import paddle.optimizer as opt
+ import paddle.distributed as dist
+
+ class LinearNet(nn.Layer):
+ def __init__(self):
+ super(LinearNet, self).__init__()
+ self._linear1 = nn.Linear(10, 10)
+ self._linear2 = nn.Linear(10, 1)
+
+ def forward(self, x):
+ return self._linear2(self._linear1(x))
+
+ def train():
+ # 1. enable dynamic mode
+ paddle.disable_static()
+
+ # 2. initialize parallel environment
+ dist.init_parallel_env()
+
+ # 3. create data parallel layer & optimizer
+ layer = LinearNet()
+ dp_layer = paddle.DataParallel(layer)
+
+ loss_fn = nn.MSELoss()
+ adam = opt.Adam(
+ learning_rate=0.001, parameters=dp_layer.parameters())
+
+ # 4. run layer
+ inputs = paddle.randn([10, 10], 'float32')
+ outputs = dp_layer(inputs)
+ labels = paddle.randn([10, 1], 'float32')
+ loss = loss_fn(outputs, labels)
+
+ loss = dp_layer.scale_loss(loss)
+ loss.backward()
+ dp_layer.apply_collective_grads()
+
+ adam.step()
+ adam.clear_grad()
+
+ if __name__ == '__main__':
+ dist.spawn(train)
diff --git a/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst b/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..656ff3c3498c9adcb080dcd45da46967e7117e01
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/prepare_context_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_distributed_prepare_context:
+
+prepare_context
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.prepare_context
diff --git a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6f471a67a8a0a17617f18208ca9b596ce8182f5e
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst
@@ -0,0 +1,40 @@
+.. _cn_api_distributed_reduce:
+
+reduce
+-------------------------------
+
+
+.. py:function:: paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0)
+
+进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。
+
+参数
+:::::::::
+ - tensor (Tensor) - 操作的输入Tensor,结果返回至目标进程号的Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - dst (int) - 返回操作结果的目标进程编号。
+ - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ np_data = np.array([[4, 5, 6], [4, 5, 6]])
+ else:
+ np_data = np.array([[1, 2, 3], [1, 2, 3]])
+ data = paddle.to_tensor(np_data)
+ paddle.distributed.reduce(data, 0)
+ out = data.numpy()
+ # [[5, 7, 9], [5, 7, 9]]
diff --git a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a238202658419ab62594f3817e2923468e56f4fb
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst
@@ -0,0 +1,45 @@
+.. _cn_api_distributed_scatter:
+
+scatter
+-------------------------------
+
+
+.. py:function:: paddle.distributed.scatter(tensor, tensor_list=None, src=0, group=0)
+
+进程组内指定进程源的tensor列表分发到其他所有进程中。
+
+参数
+:::::::::
+ - tensor (Tensor) - 操作的输出Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - tensor_list (list,可选) - 操作的输入Tensor列表,默认为None。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。
+ - src (int,可选) - 操作的源进程号,该进程号的Tensor列表将分发到其他进程中。默认为0。
+ - group (int,可选) - 工作的进程组编号,默认为0。
+
+返回
+:::::::::
+无
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ from paddle.distributed import init_parallel_env
+
+ paddle.disable_static()
+ paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
+ init_parallel_env()
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ np_data1 = np.array([7, 8, 9])
+ np_data2 = np.array([10, 11, 12])
+ else:
+ np_data1 = np.array([1, 2, 3])
+ np_data2 = np.array([4, 5, 6])
+ data1 = paddle.to_tensor(np_data1)
+ data2 = paddle.to_tensor(np_data2)
+ if paddle.distributed.ParallelEnv().local_rank == 0:
+ paddle.distributed.scatter(data1, src=1)
+ else:
+ paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1)
+ out = data1.numpy()
diff --git a/doc/fluid/api_cn/distributed_cn/spawn_cn.rst b/doc/fluid/api_cn/distributed_cn/spawn_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..21f8f762f5052474aac91c31ada8f76b664594b2
--- /dev/null
+++ b/doc/fluid/api_cn/distributed_cn/spawn_cn.rst
@@ -0,0 +1,105 @@
+.. _cn_api_distributed_spawn:
+
+spawn
+-----
+
+.. py:function:: paddle.distributed.spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options)
+
+使用 ``spawn`` 方法启动多进程任务。
+
+参数
+:::::::::
+ - func (function) - 由 ``spawn`` 方法启动的进程所调用的目标函数。该目标函数需要能够被 ``pickled`` (序列化),所以目标函数必须定义为模块的一级函数,不能是内部子函数或者类方法。
+ - args (tuple, 可选) - 传入目标函数 ``func`` 的参数。
+ - nprocs (int, 可选) - 启动进程的数目。默认值为-1。当 ``nproc`` 为-1时,模型执行时将会从环境变量中获取当前可用的所有设备进行使用:如果使用GPU执行任务,将会从环境变量 ``CUDA_VISIBLE_DEVICES`` 中获取当前所有可用的设备ID;如果使用CPU执行任务,将会从环境变量 ``CPU_NUM`` 中获取当前可用的CPU设备数,例如,可以通过指令 ``export CPU_NUM=4`` 配置默认可用CPU设备数,如果此环境变量没有设置,将会默认设置该环境变量的值为1。
+ - join (bool, 可选) - 对所有启动的进程执行阻塞的 ``join`` ,等待进程执行结束。默认为True。
+ - daemon (bool, 可选) - 配置启动进程的 ``daemon`` 属性。默认为False。
+ - **options (dict, 可选) - 其他初始化并行执行环境的配置选项。目前支持以下选项: (1) start_method (string) - 启动子进程的方法。进程的启动方法可以是 ``spawn`` , ``fork`` , ``forkserver`` 。 因为CUDA运行时环境不支持 ``fork`` 方法,当在子进程中使用CUDA时,需要使用 ``spawn`` 或者 ``forkserver`` 方法启动进程。默认方法为 ``spawn`` ; (2) cluster_node_ips (string) - 运行集群的节点(机器)IP,例如 "192.168.0.16,192.168.0.17" ,默认值为 "127.0.0.1" ; (3) node_ip (string) - 当前节点(机器)的IP。例如 "192.168.0.16" , 默认值为 "127.0.0.1" ; (4) started_port (int) - 一个训练节点(机器)上各训练进程的起始端口。例如 6170. 默认值为None ; (5) selected_gpus (string) - 指定训练使用的GPU ID, 例如 "0,1,2,3" , 默认值为None ; (6) print_config (bool) - 打印当前并行训练的配置, 默认值为False ; (7) use_paddlecloud (bool) - 配置是否使用PaddleCloud启动多进程任务,默认值为False。
+
+返回
+:::::::::
+ ``MultiprocessContext`` 对象,持有创建的多个进程。
+
+代码示例
+:::::::::
+.. code-block:: python
+
+ from __future__ import print_function
+
+ import paddle
+ import paddle.nn as nn
+ import paddle.optimizer as opt
+ import paddle.distributed as dist
+
+ class LinearNet(nn.Layer):
+ def __init__(self):
+ super(LinearNet, self).__init__()
+ self._linear1 = nn.Linear(10, 10)
+ self._linear2 = nn.Linear(10, 1)
+
+ def forward(self, x):
+ return self._linear2(self._linear1(x))
+
+ def train(print_result=False):
+ # 1. enable dynamic mode
+ paddle.disable_static()
+
+ # 2. initialize parallel environment
+ dist.init_parallel_env()
+
+ # 3. create data parallel layer & optimizer
+ layer = LinearNet()
+ dp_layer = paddle.DataParallel(layer)
+
+ loss_fn = nn.MSELoss()
+ adam = opt.Adam(
+ learning_rate=0.001, parameters=dp_layer.parameters())
+
+ # 4. run layer
+ inputs = paddle.randn([10, 10], 'float32')
+ outputs = dp_layer(inputs)
+ labels = paddle.randn([10, 1], 'float32')
+ loss = loss_fn(outputs, labels)
+
+ if print_result is True:
+ print("loss:", loss.numpy())
+
+ loss = dp_layer.scale_loss(loss)
+ loss.backward()
+ dp_layer.apply_collective_grads()
+
+ adam.step()
+ adam.clear_grad()
+
+ # Usage 1: only pass function.
+ # If your training method no need any argument, and
+ # use all visible devices for parallel training.
+ if __name__ == '__main__':
+ dist.spawn(train)
+
+ # Usage 2: pass function and arguments.
+ # If your training method need some arguments, and
+ # use all visible devices for parallel training.
+ if __name__ == '__main__':
+ dist.spawn(train, args=(True,))
+
+ # Usage 3: pass function, arguments and nprocs.
+ # If your training method need some arguments, and
+ # only use part of visible devices for parallel training.
+ # If your machine hold 8 cards {0,1,2,3,4,5,6,7},
+ # this case will use cards {0,1}; If you set
+ # CUDA_VISIBLE_DEVICES=4,5,6,7, this case will use
+ # cards {4,5}
+ if __name__ == '__main__':
+ dist.spawn(train, args=(True,), nprocs=2)
+
+ # Usage 4: pass function, arguments, nprocs and selected_gpus.
+ # If your training method need some arguments, and
+ # only use part of visible devices for parallel training,
+ # but you can't set your machine's environment varibale
+ # CUDA_VISIBLE_DEVICES, such as it is None or all cards
+ # {0,1,2,3,4,5,6,7}, you can pass `selelcted_gpus` to
+ # select the GPU cards you want to use. For example,
+ # this case will use cards {4,5} if your machine hold 8 cards.
+ if __name__ == '__main__':
+ dist.spawn(train, args=(True,), nprocs=2, selelcted_gpus='4,5')
\ No newline at end of file
diff --git a/doc/fluid/api_cn/dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn.rst
index 23b6e135713c3bbdad323a9cc1d8113d27653377..889d6c89603ccec58c067833703a70f22354f06e 100644
--- a/doc/fluid/api_cn/dygraph_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn.rst
@@ -8,40 +8,54 @@ fluid.dygraph
.. toctree::
:maxdepth: 1
- dygraph_cn/BackwardStrategy_cn.rst
dygraph_cn/BatchNorm_cn.rst
dygraph_cn/BilinearTensorProduct_cn.rst
dygraph_cn/Conv2D_cn.rst
dygraph_cn/Conv2DTranspose_cn.rst
dygraph_cn/Conv3D_cn.rst
dygraph_cn/Conv3DTranspose_cn.rst
+ dygraph_cn/CosineAnnealingDecay_cn.rst
dygraph_cn/CosineDecay_cn.rst
+ dygraph_cn/DataParallel_cn.rst
+ dygraph_cn/declarative_cn.rst
+ dygraph_cn/Dropout_cn.rst
dygraph_cn/Embedding_cn.rst
dygraph_cn/ExponentialDecay_cn.rst
dygraph_cn/FC_cn.rst
+ dygraph_cn/grad_cn.rst
dygraph_cn/GroupNorm_cn.rst
dygraph_cn/GRUUnit_cn.rst
dygraph_cn/guard_cn.rst
+ dygraph_cn/InstanceNorm_cn.rst
dygraph_cn/InverseTimeDecay_cn.rst
+ dygraph_cn/jit_cn.rst
+ dygraph_cn/LambdaDecay_cn.rst
dygraph_cn/Layer_cn.rst
dygraph_cn/LayerList_cn.rst
dygraph_cn/LayerNorm_cn.rst
dygraph_cn/Linear_cn.rst
dygraph_cn/load_dygraph_cn.rst
+ dygraph_cn/MultiStepDecay_cn.rst
dygraph_cn/NaturalExpDecay_cn.rst
dygraph_cn/NCE_cn.rst
dygraph_cn/NoamDecay_cn.rst
+ dygraph_cn/ParallelEnv_cn.rst
dygraph_cn/ParameterList_cn.rst
- dygraph_cn/no_grad_cn.rst
+ dygraph_cn/no_grad_cn.rst
dygraph_cn/PiecewiseDecay_cn.rst
dygraph_cn/PolynomialDecay_cn.rst
dygraph_cn/Pool2D_cn.rst
dygraph_cn/PRelu_cn.rst
dygraph_cn/prepare_context_cn.rst
+ dygraph_cn/ProgramTranslator_cn.rst
+ dygraph_cn/ReduceLROnPlateau_cn.rst
dygraph_cn/save_dygraph_cn.rst
dygraph_cn/Sequential_cn.rst
dygraph_cn/SpectralNorm_cn.rst
+ dygraph_cn/StepDecay_cn.rst
dygraph_cn/to_variable_cn.rst
dygraph_cn/TracedLayer_cn.rst
dygraph_cn/Tracer_cn.rst
+ dygraph_cn/TranslatedLayer_cn.rst
dygraph_cn/TreeConv_cn.rst
+ dygraph_cn/enabled_cn.rst
diff --git a/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst b/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst
deleted file mode 100644
index 5e8fc9945d206ac7936a48c7a8ad53517f2889b2..0000000000000000000000000000000000000000
--- a/doc/fluid/api_cn/dygraph_cn/BackwardStrategy_cn.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-.. _cn_api_fluid_dygraph_BackwardStrategy:
-
-BackwardStrategy
--------------------------------
-
-**注意:该API仅支持【动态图】模式**
-
-.. py:class:: paddle.fluid.dygraph.BackwardStrategy
-
-**注意:该API只在动态图下生效**
-
-BackwardStrategy是描述动态图反向执行的策略,主要功能是定义动态图反向执行时的不同策略
-
-**属性:**
-
-.. py:attribute:: sort_sum_gradient
-
-是否按照前向执行的逆序加和多个梯度,例如当 x_var( :ref:`api_guide_Variable` )作为多个OP(这里以 :ref:`cn_api_fluid_layers_scale` 为例)的输入时,其产生的梯度是否按照前向书写时的
-逆序加和,默认为False
-
-
-**代码示例**
-
-.. code-block:: python
-
- import numpy as np
- import paddle.fluid as fluid
-
- x = np.ones([2, 2], np.float32)
- with fluid.dygraph.guard():
- x_var = fluid.dygraph.to_variable(x)
- sums_inputs = []
- # 这里x_var将作为多个输入scale的输入
- for _ in range(10):
- sums_inputs.append(fluid.layers.scale(x_var))
- ret2 = fluid.layers.sums(sums_inputs)
- loss2 = fluid.layers.reduce_sum(ret2)
- backward_strategy = fluid.dygraph.BackwardStrategy()
- backward_strategy.sort_sum_gradient = True
- loss2.backward(backward_strategy)
-
-
-
-
-
-
diff --git a/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst
index 7b2030eca47a2852c4d381e1d32542aef8aab3dc..bede52decd8cf46131fd2cdf6b0b91673fd34781 100644
--- a/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/BatchNorm_cn.rst
@@ -5,6 +5,12 @@ BatchNorm
.. py:class:: paddle.fluid.dygraph.BatchNorm(num_channels, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32', data_layout='NCHW', in_place=False, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, trainable_statistics=False)
+:alias_main: paddle.nn.BatchNorm
+:alias: paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm
+:old_api: paddle.fluid.dygraph.BatchNorm
+
+
+
该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_
当use_global_stats = False时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下:
diff --git a/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst b/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst
index 095f9ad0d318733fc638126a2340f59b9c9c550f..59b18c17a32951135e431ef036a6d32771651399 100644
--- a/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/BilinearTensorProduct_cn.rst
@@ -5,6 +5,12 @@ BilinearTensorProduct
.. py:class:: paddle.fluid.dygraph.BilinearTensorProduct(input1_dim, input2_dim, output_dim, name=None, act=None, param_attr=None, bias_attr=None, dtype="float32")
+:alias_main: paddle.nn.BilinearTensorProduct
+:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
+:old_api: paddle.fluid.dygraph.BilinearTensorProduct
+
+
+
该接口用于构建 ``BilinearTensorProduct`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。双线性乘积计算式子如下。
.. math::
diff --git a/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst
index 60e0f101f01d150cc9696a4bdbd07246f643b785..b500bdbf6c7884f1bc263532eb0f3db379790fa8 100644
--- a/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Conv2DTranspose_cn.rst
@@ -5,6 +5,9 @@ Conv2DTranspose
.. py:class:: paddle.fluid.dygraph.Conv2DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32")
+
+
+
该接口用于构建 ``Conv2DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积转置层(Convlution2D Transpose Layer),其根据输入(input)、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输入特征图个数,C是输出特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。转置卷积的计算过程相当于卷积的反向计算,转置卷积又被称为反卷积(但其实并不是真正的反卷积)。详情请参考: `Conv2DTranspose `_ 。
输入 ``X`` 和输出 ``Out`` 的函数关系如下:
@@ -71,7 +74,7 @@ Conv2DTranspose
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
- 'Conv2DTranspose', num_filters=2, filter_size=3)
+ num_channels=32, num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
属性
diff --git a/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst
index 0dd3f2de92119fb6ccfb5d482ab7c6b43469e3bc..3e81c4a31738d78234527178a1408c7cc03519ef 100644
--- a/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Conv2D_cn.rst
@@ -5,6 +5,9 @@ Conv2D
.. py:class:: paddle.fluid.dygraph.Conv2D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype='float32')
+
+
+
该接口用于构建 ``Conv2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积层(Convolution2D Layer),其根据输入、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)参数来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输出特征图个数,C是输入特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。详情请参考: `卷积 `_ 。
对每个输入 ``X`` ,有等式:
@@ -43,7 +46,7 @@ Conv2D
参数:
- **num_channels** (int) - 输入图像的通道数。
- - **num_fliters** (int) - 滤波器的个数,和输出特征图个数相同。
+ - **num_filters** (int) - 滤波器的个数,和输出特征图个数相同。
- **filter_size** (int|tuple) - 滤波器大小。如果 ``filter_size`` 是一个元组,则必须包含两个整型数,分别表示滤波器高度和宽度。否则,表示滤波器高度和宽度均为 ``filter_size`` 。
- **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。
- **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。默认值:0。
diff --git a/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst
index cda322bc004a5d0b595a534d8d83fca7e3639f4f..a3e4134d1e6a31dd8e206bcf5546d511a79e9d7a 100644
--- a/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Conv3DTranspose_cn.rst
@@ -6,6 +6,9 @@ Conv3DTranspose
.. py:class:: paddle.fluid.dygraph.Conv3DTranspose(num_channels, num_filters, filter_size, output_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, dtype="float32")
+
+
+
该接口用于构建 ``Conv3DTranspose`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积转置层(Convlution3D transpose layer)根据输入(input)、滤波器(filter)和卷积核膨胀(dilations)、步长(stride)、填充来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW格式。其中 ``N`` 为batch大小, ``C`` 为通道数(channel), ``D`` 为特征深度, ``H`` 为特征高度, ``W`` 为特征宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。
diff --git a/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst
index 7199bf75b2513a4a3e1c3bac96f6c68ab79f35b0..9e0291edba17ab22bc5288e21b4e16ee5a0305f0 100644
--- a/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Conv3D_cn.rst
@@ -6,6 +6,9 @@ Conv3D
.. py:class:: paddle.fluid.dygraph.Conv3D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype="float32")
+
+
+
该接口用于构建 ``Conv3D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。3D卷积层(convolution3D layer)根据输入、滤波器(filter)、步长(stride)、填充(padding)、膨胀(dilations)、组数参数计算得到输出。输入和输出是[N, C, D, H, W]的多维tensor,其中N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。卷积三维(Convlution3D)和卷积二维(Convlution2D)相似,但多了一维深度(depth)。如果提供了bias属性和激活函数类型,bias会添加到卷积(convolution)的结果中相应的激活函数会作用在最终结果上。
对每个输入X,有等式:
diff --git a/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst
index 321f7d607e73bc9a65b506471d72dd0bf261f6f6..92ffd6338eddd0034b8d2cb3f06ddb7d9c537018 100644
--- a/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/CosineDecay_cn.rst
@@ -3,10 +3,13 @@
CosineDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.CosineDecay(learning_rate, step_each_epoch, epochs, begin=0, step=1, dtype='float32')
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供按余弦函数衰减学习率的功能。
余弦衰减的计算方式如下。
diff --git a/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst b/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0258a6136d02cbcc5f822fbc7f5d066136cf2d27
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/DataParallel_cn.rst
@@ -0,0 +1,208 @@
+.. _cn_api_fluid_dygraph_DataParallel:
+
+DataParallel
+------------
+
+.. py:class:: paddle.fluid.dygraph.DataParallel(layers, strategy)
+
+:api_attr: 命令式编程模式(动态图)
+
+通过数据并行模式执行动态图模型。
+
+目前,``DataParallel`` 仅支持以多进程的方式执行动态图模型。
+
+支持两种使用方式:
+
+1. 使用 ``paddle.distributed.spawn`` 方法启动,例如:
+
+ ``python demo.py`` (spawn need to be called in ``__main__`` method)
+
+2. 使用 ``paddle.distributed.launch`` 方法启动,例如:
+
+``python -m paddle.distributed.launch –selected_gpus=0,1 demo.py``
+
+其中 ``demo.py`` 脚本的代码可以是下面的示例代码。
+
+参数:
+ - **Layer** (Layer) - 需要通过数据并行方式执行的模型。
+ - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为None。
+
+返回:支持数据并行的 ``Layer``
+
+返回类型:Layer实例
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ import paddle.nn as nn
+ import paddle.optimizer as opt
+ import paddle.distributed as dist
+
+ class LinearNet(nn.Layer):
+ def __init__(self):
+ super(LinearNet, self).__init__()
+ self._linear1 = nn.Linear(10, 10)
+ self._linear2 = nn.Linear(10, 1)
+
+ def forward(self, x):
+ return self._linear2(self._linear1(x))
+
+ def train():
+ # 1. enable dynamic mode
+ paddle.disable_static()
+
+ # 2. initialize parallel environment
+ dist.init_parallel_env()
+
+ # 3. create data parallel layer & optimizer
+ layer = LinearNet()
+ dp_layer = paddle.DataParallel(layer)
+
+ loss_fn = nn.MSELoss()
+ adam = opt.Adam(
+ learning_rate=0.001, parameters=dp_layer.parameters())
+
+ # 4. run layer
+ inputs = paddle.randn([10, 10], 'float32')
+ outputs = dp_layer(inputs)
+ labels = paddle.randn([10, 1], 'float32')
+ loss = loss_fn(outputs, labels)
+
+ loss = dp_layer.scale_loss(loss)
+ loss.backward()
+ dp_layer.apply_collective_grads()
+
+ adam.step()
+ adam.clear_grad()
+
+ if __name__ == '__main__':
+ # 1. start by ``paddle.distributed.spawn`` (default)
+ dist.spawn(train, nprocs=2)
+ # 2. start by ``paddle.distributed.launch``
+ # train()
+
+.. py:method:: scale_loss(loss)
+
+缩放模型损失值 ``loss`` 。在数据并行模式中,损失值 ``loss`` 需要根据并行训练进程的数目进行缩放。
+
+如果不在数据并行模式下,会直接返回原 ``loss`` 。
+
+参数:
+ - **loss** (Variable) - 当前模型的损失值。
+
+返回:缩放后的损失值 ``loss``
+
+返回类型:Variable
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.nn as nn
+ import paddle.optimizer as opt
+ import paddle.distributed as dist
+
+ class LinearNet(nn.Layer):
+ def __init__(self):
+ super(LinearNet, self).__init__()
+ self._linear1 = nn.Linear(10, 10)
+ self._linear2 = nn.Linear(10, 1)
+
+ def forward(self, x):
+ return self._linear2(self._linear1(x))
+
+ def train():
+ # 1. enable dynamic mode
+ paddle.disable_static()
+
+ # 2. initialize parallel environment
+ dist.init_parallel_env()
+
+ # 3. create data parallel layer & optimizer
+ layer = LinearNet()
+ dp_layer = paddle.DataParallel(layer)
+
+ loss_fn = nn.MSELoss()
+ adam = opt.Adam(
+ learning_rate=0.001, parameters=dp_layer.parameters())
+
+ # 4. run layer
+ inputs = paddle.randn([10, 10], 'float32')
+ outputs = dp_layer(inputs)
+ labels = paddle.randn([10, 1], 'float32')
+ loss = loss_fn(outputs, labels)
+
+ loss = dp_layer.scale_loss(loss)
+ loss.backward()
+ dp_layer.apply_collective_grads()
+
+ adam.step()
+ adam.clear_grad()
+
+ if __name__ == '__main__':
+ # 1. start by ``paddle.distributed.spawn`` (default)
+ dist.spawn(train, nprocs=2)
+ # 2. start by ``paddle.distributed.launch``
+ # train()
+
+
+.. py:method:: apply_collective_grads()
+
+AllReduce(规约)参数的梯度值。
+
+返回:无
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.nn as nn
+ import paddle.optimizer as opt
+ import paddle.distributed as dist
+
+ class LinearNet(nn.Layer):
+ def __init__(self):
+ super(LinearNet, self).__init__()
+ self._linear1 = nn.Linear(10, 10)
+ self._linear2 = nn.Linear(10, 1)
+
+ def forward(self, x):
+ return self._linear2(self._linear1(x))
+
+ def train():
+ # 1. enable dynamic mode
+ paddle.disable_static()
+
+ # 2. initialize parallel environment
+ dist.init_parallel_env()
+
+ # 3. create data parallel layer & optimizer
+ layer = LinearNet()
+ dp_layer = paddle.DataParallel(layer)
+
+ loss_fn = nn.MSELoss()
+ adam = opt.Adam(
+ learning_rate=0.001, parameters=dp_layer.parameters())
+
+ # 4. run layer
+ inputs = paddle.randn([10, 10], 'float32')
+ outputs = dp_layer(inputs)
+ labels = paddle.randn([10, 1], 'float32')
+ loss = loss_fn(outputs, labels)
+
+ loss = dp_layer.scale_loss(loss)
+ loss.backward()
+ dp_layer.apply_collective_grads()
+
+ adam.step()
+ adam.clear_grad()
+
+ if __name__ == '__main__':
+ # 1. start by ``paddle.distributed.spawn`` (default)
+ dist.spawn(train, nprocs=2)
+ # 2. start by ``paddle.distributed.launch``
+ # train()
diff --git a/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst b/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ce72d582a0abaad2c4db1f2634049388203f8b51
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/Dropout_cn.rst
@@ -0,0 +1,53 @@
+.. _cn_api_fluid_dygraph_Dropout:
+
+Dropout
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.Dropout(p=0.5, seed=None, dropout_implementation='downgrade_in_infer', is_test=False)
+
+丢弃或者保持输入的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。
+
+Dropout层可以删除,提高执行效率。
+
+参数:
+ - **p** (float32,可选) - 输入单元的丢弃概率,即输入单元设置为0的概率。默认值:0.5
+ - **seed** (int,可选) - 整型数据,用于创建随机种子。如果该参数设为None,则使用随机种子。注:如果给定一个整型种子,始终丢弃相同的输出单元。训练过程中勿用固定不变的种子。默认值:None。
+ - **dropout_implementation** (str,可选) - 丢弃单元的方式,有两种'downgrade_in_infer'和'upscale_in_train'两种选择,默认:'downgrade_in_infer'。具体作用可以参考一下描述。
+
+ 1. downgrade_in_infer(default), 在预测时减小输出结果
+
+ - train: out = input * mask
+
+ - inference: out = input * (1.0 - p)
+
+ (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` )
+
+ 2. upscale_in_train, 增加训练时的结果
+
+ - train: out = input * mask / ( 1.0 - p )
+
+ - inference: out = input
+
+ (mask是一个张量,维度和输入维度相同,值为0或1,值为0的比例即为 ``p`` )
+
+ - **is_test** (bool,可选) - 标记是否是测试阶段。此标志仅对静态图模式有效。对于动态图模式,请使用 ``eval()`` 接口。默认:False。
+
+返回:无
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph.base import to_variable
+ import numpy as np
+
+ x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
+ with fluid.dygraph.guard():
+ x = to_variable(x)
+ m = fluid.dygraph.Dropout(p=0.5)
+ droped_train = m(x)
+ # 切换到 eval 模式
+ m.eval()
+ droped_eval = m(x)
+
diff --git a/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst b/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst
index d285ace3f0e1a2871e61313714d81f49d95243cb..985bf1c9eb834aa024ebab792a00b151328ef77a 100644
--- a/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Embedding_cn.rst
@@ -5,6 +5,12 @@ Embedding
.. py:class:: paddle.fluid.dygraph.Embedding(size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32')
+:alias_main: paddle.nn.Embedding
+:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
+:old_api: paddle.fluid.dygraph.Embedding
+
+
+
嵌入层(Embedding Layer)
该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例`` 。其根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。
diff --git a/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst
index 0bb670041b2595a4b3fa74565990e66fd696bff4..8a110ba82f47d912428e2e8b6bec45d30e62a739 100644
--- a/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/ExponentialDecay_cn.rst
@@ -3,10 +3,13 @@
ExponentialDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.ExponentialDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype=’float32‘)
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供一种学习率按指数函数衰减的功能。
指数衰减的计算方式如下。
diff --git a/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst b/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst
index e0b44fa34f890a6cf159841c9f5e586849c12a19..c2f986da1df15d700808d8f57596f15e0c7c6c6c 100644
--- a/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/GRUUnit_cn.rst
@@ -5,6 +5,9 @@ GRUUnit
.. py:class:: paddle.fluid.dygraph.GRUUnit(name_scope, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False, dtype='float32')
+
+
+
该接口用于构建 ``GRU(Gated Recurrent Unit)`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其用于完成单个时间步内GRU的计算,支持以下两种计算方式:
如果origin_mode为True,则使用的运算公式来自论文
diff --git a/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst
index e1f3aaa149c685a948839b813602deb9b6c1b671..5a619d25a616a3cc8922373c335fad404623ba6d 100644
--- a/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/GroupNorm_cn.rst
@@ -5,6 +5,12 @@ GroupNorm
.. py:class:: paddle.fluid.dygraph.GroupNorm(channels, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', dtype="float32")
+:alias_main: paddle.nn.GroupNorm
+:alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm
+:old_api: paddle.fluid.dygraph.GroupNorm
+
+
+
**Group Normalization层**
该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了组归一化层的功能。更多详情请参考: `Group Normalization `_ 。
diff --git a/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..334fa5109f3ba0f52b1039357e08032671cd1849
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/InstanceNorm_cn.rst
@@ -0,0 +1,51 @@
+.. _cn_api_fluid_dygraph_InstanceNorm:
+
+InstanceNorm
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.InstanceNorm(num_channels, epsilon=1e-05, param_attr=None, bias_attr=None, dtype='float32')
+
+该接口用于构建 ``InstanceNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。
+
+可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下:
+
+NCHW[batch,in_channels,in_height,in_width]
+
+更多详情请参考 : `Instance Normalization: The Missing Ingredient for Fast Stylization `_
+
+``input`` 是mini-batch的输入。
+
+.. math::
+ \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\
+ \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance of each channel in each sample a batch \\
+ \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\
+ y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift
+
+
+参数:
+ - **num_channels** (int)- 指明输入 ``Tensor`` 的通道数量。
+ - **epsilon** (float,默认1e-05)- 为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。
+ - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时, instance_norm创建相应属性的param_attr(权重)参数。默认:None。
+ - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时, instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。
+ - **dtype** (string,默认float32)- 指明输入 ``Tensor`` 的数据类型,可以为float32或float64。默认:float32。
+
+返回:无
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph.base import to_variable
+ import numpy as np
+ import paddle
+
+ # x's shape is [1, 3, 1, 2]
+ x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32')
+ with fluid.dygraph.guard():
+ x = to_variable(x)
+ instanceNorm = paddle.nn.InstanceNorm(3)
+ ret = instanceNorm(x)
+ # ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995]
+ print(ret)
+
diff --git a/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst
index 70bf0b9c37ce9fcb7f20ea448c20fb03f1a2dbe8..c693cc97791290950e90b6d5126edccb7d8ceed4 100644
--- a/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/InverseTimeDecay_cn.rst
@@ -3,10 +3,13 @@
InverseTimeDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.InverseTimeDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32')
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供反时限学习率衰减的功能。
反时限学习率衰减计算方式如下。
diff --git a/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1b59cfbaf888fe971e6d39a72579848108d57094
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/LambdaDecay_cn.rst
@@ -0,0 +1,65 @@
+.. _cn_api_fluid_dygraph_LambdaDecay:
+
+LambdaDecay
+-------------------------------
+
+
+.. py:class:: paddle.fluid.dygraph.LambdaDecay(learning_rate, lr_lambda)
+
+:api_attr: 命令式编程模式(动态图)
+
+
+该API提供 lambda函数 设置学习率的功能。 ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。
+
+算法可以描述为:
+
+.. code-block:: text
+
+ learning_rate = 0.5 # init learning_rate
+ lr_lambda = lambda epoch: 0.95 ** epoch
+
+ learning_rate = 0.5 # epoch 0
+ learning_rate = 0.475 # epoch 1
+ learning_rate = 0.45125 # epoch 2
+
+参数:
+ - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。
+ - **lr_lambda** (function) - ``lr_lambda`` 为一个lambda函数,其通过 ``epoch`` 计算出一个因子,该因子会乘以初始学习率。
+
+返回: 无
+
+**代码示例**:
+
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ with fluid.dygraph.guard():
+ x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
+ linear = fluid.dygraph.Linear(10, 10)
+ input = fluid.dygraph.to_variable(x)
+ scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x)
+ adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
+ for epoch in range(6):
+ for batch_id in range(5):
+ out = linear(input)
+ loss = fluid.layers.reduce_mean(out)
+ adam.minimize(loss)
+ scheduler.epoch()
+ print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr()))
+ # epoch:0, current lr is 0.5
+ # epoch:1, current lr is 0.475
+ # epoch:2, current lr is 0.45125
+
+.. py:method:: epoch(epoch=None)
+通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。
+
+参数:
+ - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。
+
+返回:
+ 无
+
+**代码示例**:
+
+ 参照上述示例代码。
diff --git a/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst b/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst
index 7186e7df7ccd072d56f960d5ac20c1f8f3dce6c9..0dc2468dff0ca366300ee50558cab2c56df68aad 100644
--- a/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/LayerList_cn.rst
@@ -5,6 +5,9 @@ LayerList
.. py:class:: paddle.fluid.dygraph.LayerList(sublayers=None)
+
+
+
LayerList用于保存子层列表,它包含的子层将被正确地注册和添加。列表中的子层可以像常规python列表一样被索引。
参数:
diff --git a/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst
index 96ef964915564c591d08dc217a7fd588f883d4a3..2fd08fe7ab4ddf0fb1055a49bb96ac70bcb087d5 100644
--- a/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/LayerNorm_cn.rst
@@ -5,6 +5,12 @@ LayerNorm
.. py:class:: paddle.fluid.dygraph.LayerNorm(normalized_shape, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, dtype="float32")
+:alias_main: paddle.nn.LayerNorm
+:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm
+:old_api: paddle.fluid.dygraph.LayerNorm
+
+
+
该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_
计算公式如下
@@ -47,7 +53,7 @@ LayerNorm
x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
- layernorm = fluid.LayerNorm('LayerNorm', begin_norm_axis=1)
- ret = layernorm(x)
+ layerNorm = fluid.LayerNorm([32, 32])
+ ret = layerNorm(x)
diff --git a/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst b/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst
index fcc1bf6fc03b41cb7bbd1095fb754d9b4c115944..ffce7959f30a98d46783db09b3c4f8b0a657777b 100644
--- a/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Layer_cn.rst
@@ -5,6 +5,9 @@ Layer
.. py:class:: paddle.fluid.dygraph.Layer(name_scope=None, dtype=core.VarDesc.VarType.FP32)
+
+
+
基于OOD实现的动态图Layer,包含该Layer的参数、前序运行的结构等信息。
参数:
@@ -13,6 +16,18 @@ Layer
返回:无
+.. py:method:: train()
+
+将此层及其所有子层设置为训练模式。这只会影响某些模块,如Dropout和BatchNorm。
+
+返回:无
+
+.. py:method:: eval()
+
+将此层及其所有子层设置为预测模式。这只会影响某些模块,如Dropout和BatchNorm。
+
+返回:无
+
.. py:method:: full_name()
Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__.__name__ 。
@@ -21,6 +36,100 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__
返回类型:str
+.. py:method:: register_forward_pre_hook(hook)
+
+为Layer注册一个 ``forward pre-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之前被调用。
+
+``hook`` 函数具有以下形式:它的 ``input`` 是 ``Layer`` 的 ``input`` ,并且可以返回一个元组或者单个修改值;如果返回单个修改值,则将值包装到一个元组中。用户可以使用该函数来查看或修改 ``Layer`` ``forward`` 函数的输入。
+
+hook(Layer, input) -> None or modified input
+
+参数:
+ - **hook** (function) - 被注册为 ``forward pre-hook`` 的函数
+
+返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。
+
+返回类型: ``HookRemoveHelper`` 类对象
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ # forward_pre_hook函数修改了layer的输入:input = input * 2
+ def forward_pre_hook(layer, input):
+ # 改变输入值
+ input_return = (input[0] * 2)
+ return input_return
+
+ with fluid.dygraph.guard():
+ linear = fluid.Linear(13, 5, dtype="float32")
+
+ # 注册hook
+ forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook)
+
+ value0 = np.arange(26).reshape(2, 13).astype("float32")
+ in0 = fluid.dygraph.to_variable(value0)
+ out0 = linear(in0)
+
+ # 移除hook
+ forward_pre_hook_handle.remove()
+
+ value1 = value0 * 2
+ in1 = fluid.dygraph.to_variable(value1)
+ out1 = linear(in1)
+
+ # hook改变了layer的输入(input = input * 2),所以out0等于out1
+ assert (out0.numpy() == out1.numpy()).any()
+
+.. py:method:: register_forward_post_hook(hook)
+
+为Layer注册一个 ``forward post-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之后被调用。
+
+``hook`` 函数具有以下形式,它的 ``input`` 和 ``output`` 是 ``Layer`` 的 ``input`` 和 ``output`` 。用户可以用该函数来查看和修改 ``Layer`` ``forward`` 函数的输出。
+
+hook(Layer, input, output) -> None or modified output
+
+参数:
+ - **hook** (function) - 被注册为 ``forward post-hook`` 的函数
+
+返回:一个 ``HookRemoveHelper`` 类对象,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。
+
+返回类型: ``HookRemoveHelper`` 类对象
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ # forward_post_hook函数改变了layer的输出:output = output * 2
+ def forward_post_hook(layer, input, output):
+ # 改变输出值
+ return output * 2
+
+ with fluid.dygraph.guard():
+ linear = fluid.Linear(13, 5, dtype="float32")
+
+ # 注册hook
+ forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook)
+
+ value1 = np.arange(26).reshape(2, 13).astype("float32")
+ in1 = fluid.dygraph.to_variable(value1)
+
+ out0 = linear(in1)
+
+ # remove the hook
+ forward_post_hook_handle.remove()
+
+ out1 = linear(in1)
+
+ # hook改变了layer的输出(output = output * 2),所以out0等于out1 * 2
+ assert (out0.numpy() == (out1.numpy()) * 2).any()
+
.. py:method:: create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None)
为Layer创建参数。
@@ -147,6 +256,87 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__
for prefix, layer in model.named_sublayers():
print(prefix, layer)
+.. py:method:: register_buffer(name, variable, persistable=True)
+
+将一个Variable注册为buffer。
+
+buffer是一个非参数类型的变量,不会被优化器更新,但在评估或预测阶段可能是必要的状态变量。比如 ``BatchNorm`` 中的均值和方差。
+
+注册的buffer默认是可持久性的,会被保存到 ``state_dict`` 中。如果指定 ``persistable`` 参数为False,则会注册一个非持久性的buffer,即不会同步和保存到 ``state_dict`` 中。
+
+参数:
+ - **name** (str) - 注册buffer的名字。可以通过此名字来访问已注册的buffer。
+ - **variable** (Variable) - 将被注册为buffer的变量。
+ - **persistable** (bool, 可选) - 注册的buffer是否需要可持久性地保存到 ``state_dict`` 中。
+
+返回:None
+
+返回类型:None
+
+**代码示例**
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+
+ with fluid.dygraph.guard():
+ linear = fluid.Linear(10, 3)
+ value = np.array([0]).astype("float32")
+ buffer = fluid.dygraph.to_variable(value)
+ linear.register_buffer("buf_name", buffer, persistable=True)
+
+ # get the buffer by attribute.
+ print(linear.buf_name)
+
+.. py:method:: buffers(include_sublayers=True)
+
+返回一个由当前层及其子层的所有buffers组成的列表。
+
+参数:
+ - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。
+
+返回:一个由当前层及其子层的所有buffers组成的列表,列表中的元素类型为Variable。
+
+返回类型:list
+
+.. py:method:: named_buffers(prefix='', include_sublayers=True)
+
+返回层中所有buffers的迭代器,生成名称和buffer的元组。
+
+参数:
+ - **prefix** (str, 可选) - 在所有buffer名称前加的前缀。默认值:''。
+ - **include_sublayers** (bool, 可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。
+
+返回:产出名称和buffer的元组的迭代器。
+
+返回类型:iterator
+
+**代码示例**
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+
+ with fluid.dygraph.guard():
+ fc1 = fluid.Linear(10, 3)
+ buffer1 = fluid.dygraph.to_variable(np.array([0]).astype("float32"))
+ # register a variable as buffer by specific `persistable`
+ fc1.register_buffer("buf_name_1", buffer1, persistable=True)
+
+ fc2 = fluid.Linear(3, 10)
+ buffer2 = fluid.dygraph.to_variable(np.array([1]).astype("float32"))
+ # register a buffer by assigning an attribute with Variable.
+ # The `persistable` can only be False by this way.
+ fc2.buf_name_2 = buffer2
+
+ model = fluid.dygraph.Sequential(fc1, fc2)
+
+ # get all named buffers
+ for name, buffer in model.named_buffers():
+ print(name, buffer)
+
.. py:method:: forward(*inputs, **kwargs)
定义每次调用时执行的计算。应该被所有子类覆盖。
@@ -181,13 +371,13 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__
.. py:method:: state_dict(destination=None, include_sublayers=True)
-获取当前层及其子层的所有参数。并将所有参数存放在dict结构中。
+获取当前层及其子层的所有参数和可持久性buffers。并将所有参数和buffers存放在dict结构中。
参数:
- - **destination** (dict, 可选) - 如果提供 ``destination`` ,则所有参数都将存放在 ``destination`` 中。 默认值:None。
- - **include_sublayers** (bool, 可选) - 如果设置为True,则包括子层的参数。默认值:True。
+ - **destination** (dict, 可选) - 如果提供 ``destination`` ,则所有参数和可持久性buffers都将存放在 ``destination`` 中。 默认值:None。
+ - **include_sublayers** (bool, 可选) - 如果设置为True,则包括子层的参数和buffers。默认值:True。
-返回:包含所有参数的dict
+返回:包含所有参数和可持久行buffers的dict
返回类型:dict
@@ -203,11 +393,11 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__
.. py:method:: set_dict(stat_dict, include_sublayers=True)
-根据传入的 ``stat_dict`` 设置参数。 所有参数将由 ``stat_dict`` 中的 ``Tensor`` 设置。
+根据传入的 ``stat_dict`` 设置参数和可持久性buffers。 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。
参数:
- - **state_dict** (dict) - 包含所有参数的dict。
- - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数。 默认值:True。
+ - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。
+ - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers。 默认值:True。
返回:None
@@ -228,11 +418,11 @@ Layer的全名。组成方式为: ``name_scope`` + “/” + MyLayer.__class__
.. warning::
该函数将被弃用。请使用set_dict函数。
-根据传入的 ``stat_dict`` 设置参数。 所有参数将由 ``stat_dict`` 中的 ``Tensor`` 设置。
+根据传入的 ``stat_dict`` 设置参数和可持久性buffers。 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。
参数:
- - **state_dict** (dict) - 包含所有参数的dict。
- - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数。 默认值:True。
+ - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。
+ - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers。 默认值:True。
返回:None
diff --git a/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst b/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst
index 848484d4f77adedf5d72c3e047fc38bf6b9c64df..c741deb393c1eda09d4c8daa1521139c564e8ce9 100644
--- a/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Linear_cn.rst
@@ -5,6 +5,12 @@ Linear
.. py:class:: paddle.fluid.dygraph.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype='float32')
+:alias_main: paddle.nn.Linear
+:alias: paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear
+:old_api: paddle.fluid.dygraph.Linear
+
+
+
**线性变换层:**
diff --git a/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..896f90066ca8463f34bee8005b0f46bd4fd68a25
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/MultiStepDecay_cn.rst
@@ -0,0 +1,72 @@
+.. _cn_api_fluid_dygraph_MultiStepDecay:
+
+MultiStepDecay
+-------------------------------
+
+
+.. py:class:: paddle.fluid.dygraph.MultiStepDecay(learning_rate, milestones, decay_rate=0.1)
+
+:api_attr: 命令式编程模式(动态图)
+
+
+该接口提供 ``MultiStep`` 衰减学习率的功能。
+
+算法可以描述为:
+
+.. code-block:: text
+
+ learning_rate = 0.5
+ milestones = [30, 50]
+ decay_rate = 0.1
+ if epoch < 30:
+ learning_rate = 0.5
+ elif epoch < 50:
+ learning_rate = 0.05
+ else:
+ learning_rate = 0.005
+
+参数:
+ - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。
+ - **milestones** (tuple|list) - 列表或元组。必须是递增的。
+ - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。
+
+返回: 无
+
+**代码示例**:
+
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ with fluid.dygraph.guard():
+ x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
+ linear = fluid.dygraph.Linear(10, 10)
+ input = fluid.dygraph.to_variable(x)
+ scheduler = fluid.dygraph.MultiStepDecay(0.5, milestones=[3, 5])
+ adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
+ for epoch in range(6):
+ for batch_id in range(5):
+ out = linear(input)
+ loss = fluid.layers.reduce_mean(out)
+ adam.minimize(loss)
+ scheduler.epoch()
+ print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr()))
+ # epoch:0, current lr is 0.5
+ # epoch:1, current lr is 0.5
+ # epoch:2, current lr is 0.5
+ # epoch:3, current lr is 0.05
+ # epoch:4, current lr is 0.05
+ # epoch:5, current lr is 0.005
+
+.. py:method:: epoch(epoch=None)
+通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。
+
+参数:
+ - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。
+
+返回:
+ 无
+
+**代码示例**:
+
+ 参照上述示例代码。
diff --git a/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst b/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst
index 63bb80d14525c460efcfd91c52a59e687bf722b5..45302572ed5bfb377763073775d19cbed4310079 100644
--- a/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/NCE_cn.rst
@@ -5,6 +5,9 @@ NCE
.. py:class:: paddle.fluid.dygraph.NCE(num_total_classes, dim, param_attr=None, bias_attr=None, num_neg_samples=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False, dtype="float32")
+
+
+
该接口用于构建 ``NCE`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``NCE`` 损失函数的功能,其默认使用均匀分布进行抽样,计算并返回噪音对比估计( noise-contrastive estimation training loss)。更多详情请参考:`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models `_
参数:
@@ -42,7 +45,6 @@ NCE
words.append(fluid.dygraph.base.to_variable(inp_word[i]))
emb = fluid.Embedding(
- 'embedding',
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
@@ -57,17 +59,17 @@ NCE
embs3 = fluid.layers.concat(input=embs3, axis=1)
nce = fluid.NCE(
- num_total_classes=dict_size,
- dim=embs3.shape[1],
- num_neg_samples=2,
- sampler="custom_dist",
- custom_dist=nid_freq_arr.tolist(),
- seed=1,
- param_attr='nce.w',
- bias_attr='nce.b')
+ num_total_classes=dict_size,
+ dim=embs3.shape[1],
+ num_neg_samples=2,
+ sampler="custom_dist",
+ custom_dist=nid_freq_arr.tolist(),
+ seed=1,
+ param_attr='nce.w',
+ bias_attr='nce.b')
wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
- nce_loss3 = nce(embs3, words[label_word])
+ nce_loss3 = nce(embs3, wl)
属性
::::::::::::
diff --git a/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst
index b2ad39a8a3f566becda4e8ddf6db206ed89a4da0..03a7e10a1cc4b66180ff118d8408ce21d9f5a30a 100644
--- a/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/NaturalExpDecay_cn.rst
@@ -3,10 +3,13 @@
NaturalExpDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.NaturalExpDecay(learning_rate, decay_steps, decay_rate, staircase=False, begin=0, step=1, dtype='float32')
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供按自然指数衰减学习率的功能。
自然指数衰减的计算方式如下。
@@ -39,7 +42,7 @@ NaturalExpDecay
- **staircase** (bool,可选) - 若为True, 学习率变化曲线呈阶梯状,若为False,学习率变化值曲线为平滑的曲线。默认值为False。
- **begin** (int,可选) – 起始步,即以上运算式子中global_step的初始化值。默认值为0。
- **step** (int,可选) – 步大小,即以上运算式子中global_step的每次的增量值。默认值为1。
- - **dtype** – (str,可选) 初始化学习率变量的数据类型,可以为"float32", "float64"。默认值为"float32"。
+ - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。
返回: 无
@@ -50,12 +53,14 @@ NaturalExpDecay
import paddle.fluid as fluid
base_lr = 0.1
with fluid.dygraph.guard():
+ emb = fluid.dygraph.Embedding([10, 10])
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
- learning_rate=base_lr,
- decay_steps=10000,
- decay_rate=0.5,
- staircase=True))
+ learning_rate=base_lr,
+ decay_steps=10000,
+ decay_rate=0.5,
+ staircase=True),
+ parameter_list=emb.parameters())
diff --git a/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst
index 1b58aefb9ce6788bdeca195cc3479647a56da846..390e8b3e3f5a7ea1dac71fbd75468d26b042b4c6 100644
--- a/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/NoamDecay_cn.rst
@@ -3,9 +3,12 @@
NoamDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
-.. py:class:: paddle.fluid.dygraph.NoamDecay(d_model, warmup_steps, begin=1, step=1, dtype='float32')
+.. py:class:: paddle.fluid.dygraph.NoamDecay(d_model, warmup_steps, begin=1, step=1, dtype='float32', learning_rate=1.0)
+
+:api_attr: 命令式编程模式(动态图)
+
+
该接口提供Noam衰减学习率的功能。
@@ -13,7 +16,7 @@ Noam衰减的计算方式如下。
.. math::
- decayed\_learning\_rate = d_{model}^{-0.5} * min(global\_steps^{-0.5}, global\_steps * warmup\_steps^{-1.5})
+ decayed\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(global\_steps^{-0.5}, global\_steps * warmup\_steps^{-1.5})
关于Noam衰减的更多细节请参考 `attention is all you need `_
@@ -28,6 +31,7 @@ Noam衰减的计算方式如下。
- **begin** (int,可选) – 起始步。即以上运算式子中global_steps的初始值。默认值为0。
- **step** (int,可选) – 步大小。即以上运算式子中global_steps的递增值。默认值为1。
- **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认值为"float32"。
+ - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。
返回: 无
@@ -39,7 +43,9 @@ Noam衰减的计算方式如下。
warmup_steps = 100
learning_rate = 0.01
with fluid.dygraph.guard():
+ emb = fluid.dygraph.Embedding([10, 10])
optimizer = fluid.optimizer.SGD(
learning_rate = fluid.dygraph.NoamDecay(
1/(warmup_steps *(learning_rate ** 2)),
- warmup_steps) )
+ warmup_steps),
+ parameter_list = emb.parameters())
diff --git a/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst b/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst
index 57ac3449cd5fd4bde7b1f4638759d6c93c3557d9..8252bee477151d00696254cbe2ae4ea8dda01261 100644
--- a/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/PRelu_cn.rst
@@ -5,6 +5,9 @@ PRelu
.. py:class:: paddle.fluid.dygraph.PRelu(mode, input_shape=None, param_attr=None, dtype="float32")
+
+
+
该接口用于构建 ``PRelu`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了 ``PRelu`` 激活函数的三种激活方式。
计算公式如下:
diff --git a/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..846001c6e98239282f6a971d82a174d3e32068c5
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/ParallelEnv_cn.rst
@@ -0,0 +1,145 @@
+.. _cn_api_fluid_dygraph_ParallelEnv:
+
+ParallelEnv
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.ParallelEnv()
+
+**注意:**
+ **这个类的曾用名为 Env, 这个旧的名字会被废弃,请使用新的类名 ParallelEnv。**
+
+这个类用于获取动态图模型并行执行所需的环境变量值。
+
+动态图并行模式现在需要使用 `paddle.distributed.launch` 模块启动,所需的环境变量默认由 `paddle.distributed.launch` 模块自动配置。
+
+ParallelEnv通常需要和 `fluid.dygraph.DataParallel` 一起使用,用于配置动态图并行执行。
+
+**示例代码:**
+ .. code-block:: python
+
+ # 这个示例需要由paddle.distributed.launch启动, 用法为:
+ # python -m paddle.distributed.launch --selected_gpus=0,1 example.py
+ # 脚本example.py中的代码是下面这个示例.
+
+ import numpy as np
+ import paddle.fluid as fluid
+ import paddle.fluid.dygraph as dygraph
+ from paddle.fluid.optimizer import AdamOptimizer
+ from paddle.fluid.dygraph.nn import Linear
+ from paddle.fluid.dygraph.base import to_variable
+
+ place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id)
+ with fluid.dygraph.guard(place=place):
+
+ # 准备数据并行的环境
+ strategy=dygraph.prepare_context()
+
+ linear = Linear(1, 10, act="softmax")
+ adam = fluid.optimizer.AdamOptimizer()
+
+ # 配置模型为并行模型
+ linear = dygraph.DataParallel(linear, strategy)
+
+ x_data = np.random.random(size=[10, 1]).astype(np.float32)
+ data = to_variable(x_data)
+
+ hidden = linear(data)
+ avg_loss = fluid.layers.mean(hidden)
+
+ # 根据参与训练GPU卡的数量对loss值进行缩放
+ avg_loss = linear.scale_loss(avg_loss)
+
+ avg_loss.backward()
+
+ # 收集各个GPU卡上的梯度值
+ linear.apply_collective_grads()
+
+ adam.minimize(avg_loss)
+ linear.clear_gradients()
+
+属性
+::::::::::::
+
+.. py:attribute:: nranks
+
+参与训练进程的数量,一般也是训练所使用GPU卡的数量。
+
+此属性的值等于环境变量 `PADDLE_TRAINERS_NUM` 的值。默认值为1。
+
+**示例代码**
+ .. code-block:: python
+
+ # 在Linux环境,提前执行此命令: export PADDLE_TRAINERS_NUM=4
+ import paddle.fluid as fluid
+
+ env = fluid.dygraph.ParallelEnv()
+ print("The nranks is %d" % env.nranks)
+ # The nranks is 4
+
+
+.. py:attribute:: local_rank
+
+当前训练进程的编号。
+
+此属性的值等于环境变量 `PADDLE_TRAINER_ID` 的值。默认值是0。
+
+**示例代码**
+ .. code-block:: python
+
+ # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ID=0
+ import paddle.fluid as fluid
+
+ env = fluid.dygraph.ParallelEnv()
+ print("The local rank is %d" % env.local_rank)
+ # The local rank is 0
+
+
+.. py:attribute:: dev_id
+
+当前用于并行训练的GPU的编号。
+
+此属性的值等于环境变量 `FLAGS_selected_gpus` 的值。默认值是0。
+
+**示例代码**
+ .. code-block:: python
+
+ # 在Linux环境,提前执行此命令: export FLAGS_selected_gpus=1
+ import paddle.fluid as fluid
+
+ env = fluid.dygraph.ParallelEnv()
+ print("The device id are %d" % env.dev_id)
+ # The device id are 1
+
+
+.. py:attribute:: current_endpoint
+
+当前训练进程的终端节点IP与相应端口,形式为(机器节点IP:端口号)。例如:127.0.0.1:6170。
+
+此属性的值等于环境变量 `PADDLE_CURRENT_ENDPOINT` 的值。默认值为空字符串""。
+
+**示例代码**
+ .. code-block:: python
+
+ # 在Linux环境,提前执行此命令: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170
+ import paddle.fluid as fluid
+
+ env = fluid.dygraph.ParallelEnv()
+ print("The current endpoint are %s" % env.current_endpoint)
+ # The current endpoint are 127.0.0.1:6170
+
+
+.. py:attribute:: trainer_endpoints
+
+当前任务所有参与训练进程的终端节点IP与相应端口,用于在NCCL2初始化的时候建立通信,广播NCCL ID。
+
+此属性的值等于环境变量 `PADDLE_TRAINER_ENDPOINTS` 的值。默认值为空字符串""。
+
+**示例代码**
+ .. code-block:: python
+
+ # 在Linux环境,提前执行此命令: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171
+ import paddle.fluid as fluid
+
+ env = fluid.dygraph.ParallelEnv()
+ print("The trainer endpoints are %s" % env.trainer_endpoints)
+ # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171']
\ No newline at end of file
diff --git a/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst b/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst
index 6d3c29ebf836a900e14f2c5625e7ad2263d52252..82ca04fef6bde4a149153642c29b0e449931da9e 100644
--- a/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/ParameterList_cn.rst
@@ -5,6 +5,9 @@ ParameterList
.. py:class:: paddle.fluid.dygraph.ParameterList(parameters=None)
+
+
+
参数列表容器。此容器的行为类似于Python列表,但它包含的参数将被正确地注册和添加。
参数:
diff --git a/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst
index 74478aabded2640d36e461cfe7075bc4435dc137..d483d5d34ed9a271f1b61f1ed97b8c09ab2626f2 100644
--- a/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/PiecewiseDecay_cn.rst
@@ -3,10 +3,13 @@
PiecewiseDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.PiecewiseDecay(boundaries, values, begin, step=1, dtype='float32')
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供对初始学习率进行分段(piecewise)常数衰减的功能。
分段常数衰减的过程举例描述如下。
@@ -35,8 +38,10 @@ PiecewiseDecay
boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1]
with fluid.dygraph.guard():
+ emb = fluid.dygraph.Embedding( [10, 10] )
optimizer = fluid.optimizer.SGD(
- learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0) )
+ learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0),
+ parameter_list = emb.parameters() )
diff --git a/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst
index 0a2a989688719208d6d783918fb8e441448185b8..df03f3f10ef90733cf7fc2027efc2028018da1b0 100644
--- a/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/PolynomialDecay_cn.rst
@@ -3,10 +3,13 @@
PolynomialDecay
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.PolynomialDecay(learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, begin=0, step=1, dtype='float32')
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口提供学习率按多项式衰减的功能。通过多项式衰减函数,使得学习率值逐步从初始的 ``learning_rate``,衰减到 ``end_learning_rate`` 。
计算方式如下。
@@ -52,10 +55,8 @@ PolynomialDecay
total_step = 5000
end_lr = 0
with fluid.dygraph.guard():
+ emb = fluid.dygraph.Embedding( [10, 10])
optimizer = fluid.optimizer.SGD(
learning_rate = fluid.dygraph.PolynomialDecay(
- start_lr, total_step, end_lr, power=1.0) )
-
-
-
-
+ start_lr, total_step, end_lr, power=1.0),
+ parameter_list = emb.parameters())
diff --git a/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst b/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst
index 0869d810c1906d479dc43bd8231e810bc6a810fc..e66ec6b3237edbe73446be147aef39efe3cb66a8 100644
--- a/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Pool2D_cn.rst
@@ -3,11 +3,17 @@
Pool2D
-------------------------------
-.. py:class:: paddle.fluid.dygraph.Pool2D(pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, exclusive=True)
+.. py:class:: paddle.fluid.dygraph.Pool2D(pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, exclusive=True, data_format="NCHW")
+
+:alias_main: paddle.nn.Pool2D
+:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
+:old_api: paddle.fluid.dygraph.Pool2D
+
+
该接口用于构建 ``Pool2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维池化层,并使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` , 步长 ``pool_stride`` ,填充 ``pool_padding`` 这些参数得到输出。
-输入X和输出Out是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数( ``ksize``, ``strides``, ``paddings`` )含有两个整型元素。分别表示高度和宽度上的参数。输入X的大小和输出Out的大小可能不一致。
+输入X和输出Out默认是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数( ``ksize``, ``strides``, ``paddings`` )含有两个整型元素。分别表示高度和宽度上的参数。输入X的大小和输出Out的大小可能不一致。
例如:
@@ -60,13 +66,15 @@ Pool2D
- **use_cudnn** (bool, 可选)- 是否用cudnn核,只有已安装cudnn库时才有效。默认True。
- **ceil_mode** (bool, 可选)- 是否用ceil函数计算输出高度和宽度。如果设为False,则使用floor函数。默认为False。
- **exclusive** (bool, 可选) - 是否在平均池化模式忽略填充值。默认为True。
+ - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。
返回:无
抛出异常:
- - ``ValueError`` - 如果 ``pool_type`` 既不是“max”也不是“avg”
- - ``ValueError`` - 如果 ``global_pooling`` 为False并且‘pool_size’为-1
- - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值
+ - ``ValueError`` - 如果 ``pool_type`` 既不是“max”也不是“avg”。
+ - ``ValueError`` - 如果 ``global_pooling`` 为False并且 ``pool_size`` 为-1。
+ - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。
+ - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。
**代码示例**
@@ -77,11 +85,11 @@ Pool2D
import numpy as np
with fluid.dygraph.guard():
- data = np.random.random((3, 32, 32, 5)).astype('float32')
- pool2d = fluid.dygraph.Pool2D(pool_size=2,
+ data = np.random.random((3, 32, 32, 5)).astype('float32')
+ pool2d = fluid.dygraph.Pool2D(pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
- pool2d_res = pool2d(to_variable(data))
+ pool2d_res = pool2d(to_variable(data))
diff --git a/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst b/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75cd816fbdeb794fbfb8efd8ff471c16d6812875
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/ProgramTranslator_cn.rst
@@ -0,0 +1,263 @@
+.. _cn_api_fluid_dygraph_ProgramTranslator
+
+ProgramTranslator
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.dygraph_to_static.ProgramTranslator()
+
+将动态图函数转为静态图函数的类。该类是个单例(singleton)。
+
+参数:
+ 无。
+
+返回:ProgramTranslator 单例对象。
+
+返回类型:ProgramTranslator。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ # 以下两种调用方法得到同一个对象,因为ProgramTranslator是个单例
+ fluid.dygraph.ProgramTranslator()
+ fluid.dygraph.ProgramTranslator.get_instance()
+
+.. py:method:: enable(enable_declarative)
+
+全局开启或关闭动态图转化为静态图。
+
+参数:
+ - **enable_declarative** (bool) - 设置True或者False来打开或关闭declarative 。
+
+返回:None。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ @fluid.dygraph.jit.declarative
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) > 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+ prog_trans.enable(False)
+
+ x = np.ones([1, 2])
+ # The declarative is disabled so the func is run in dygraph
+ with fluid.dygraph.guard():
+ print(func(x).numpy()) # [[2. 2.]]
+
+.. py:method:: get_output(dygraph_func, *args, **kwargs)
+
+返回动态图函数输出的VarBase,但是该动态图函数的数值计算过程会被转化为静态图模式运行。
+
+参数:
+ - **dygraph_func** (callable) - 动态图函数。
+ - **args, kwargs** - 动态图函数的输入。
+
+返回:包含数值结果的VarBase或者VarBase的元组,是输入动态图函数的返回值。
+
+返回类型:VarBase或者VarBase的元组。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) > 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+
+ with fluid.dygraph.guard():
+ x = np.ones([1, 2])
+ x_v = prog_trans.get_output(func, x)
+ print(x_v.numpy()) # [[0. 0.]]
+
+.. py:method:: get_func(dygraph_func)
+
+返回一个可调用函数,该函数将输入动态图函数接口转化为静态图组网接口。组网接口不像动态图接口,其并不直接返回数据结果。用户需要自行处理对应的Program和Eexecutor。
+
+参数:
+ - **dygraph_func** (callable) - 动态图函数。
+
+返回:将动态图接口转为静态图组网接口的可调用函数。
+
+返回类型:可调用函数。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) > 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+
+ static_func = prog_trans.get_func(func)
+ print(callable(static_func)) # True
+
+.. py:method:: get_program(dygraph_func, *args, **kwargs)
+
+返回动态图函数转化后的静态图Program和输入输出Varaible。用户可以使用Executor来执行该Program。
+
+参数:
+ - **dygraph_func** (callable) - 动态图函数。
+ - **args, kwargs** - 动态图函数的输入。
+
+返回:元组(main_program, startup_program, inputs, outputs)
+ main_program: 转化后的main program。
+ startup_program: 转化后的startup program。
+ inputs: 输入Variable的列表,这些Variable可以在执行去feed。
+ outputs: 输出Variable的列表,这些Variable可以在运行时被fetch。
+
+返回类型:类型为(Program, Program, list(Variable), list(Variable)) 的元组。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) > 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+
+ x = np.ones([1, 2])
+ main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x)
+ print([i.name for i in inputs])
+ # ['feed_0'] 需要被feed的输入Variable名字,对应x
+ print([o.name for o in outputs])
+ # ['_generated_var_4'] 需要被fetch的输出Variable名字,对应x_v
+
+.. py:method:: get_code(dygraph_func)
+
+返回动态图函数转化后的静态图代码字符串。
+
+参数:
+ - **dygraph_func** (callable) - 动态图函数。
+
+返回:转化后的静态图代码字符串。
+
+返回类型:str。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) > 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+
+ code = prog_trans.get_code(func)
+ print(type(code)) #
+
+.. py:method:: save_inference_model(dirname, feed=None, fetch=None)
+
+将现有模型保存为预测模型。保存过程会裁剪main program,只保存和预测输入输出有关的部分,构建成新的Program,并将此Program和相关参数保存到指定dirname路径下,被保存的模型可以被 :ref:`cn_api_fluid_io_load_inference_model` 或者C++预测接口使用。
+
+参数:
+ - **dirname** (str) - 存储预测模型的目录。
+ - **feed (list[int], 可选)** - 预测模型要保存的输入Variable的序号。如果为None,则动态图函数的所有输入变量将被保存。默认值为None。
+ - **fetch (list[int], 可选)** - 预测模型要保存的输出Variable的序号。如果为None,则动态图函数的所有输出变量将被保存。默认值为None。
+
+返回:None。
+
+**示例代码**
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ from paddle.fluid.dygraph import ProgramTranslator
+
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ loss = fluid.layers.mean(z)
+ return z, loss
+
+ with fluid.dygraph.guard(fluid.CPUPlace()):
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ loss, out = net(x)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ # 保存模型
+ # 注意fetch=[0]意味着我们将序号为0的动态图return输出'z'作为预测输出
+ prog_trans = ProgramTranslator()
+ prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0])
+
+ # 在这个例子中,预测模型会根据输出'z'进行裁剪。被裁剪后的Program 会被保
+ # 存在"./dy2stat_infer_model" 文件夹,并且参数也会保存为同一个文件夹下
+ # 不同文件。
+
+.. py:method:: get_program_cache()
+
+返回ProgramCache单例。这个方法是PaddlePaddle开发者用来管理ProgramTranslator中的Program缓存,普通用户不需要使用这个方法。
+
+返回:ProgramTranslator中的ProgramCache。
+
+返回类型:ProgramCache。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ prog_trans = fluid.dygraph.ProgramTranslator()
+ prog_cache = prog_trans.get_program_cache()
+
diff --git a/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst b/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b0a0b75f7b31244421f02cab719a342461a9f7c1
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/ReduceLROnPlateau_cn.rst
@@ -0,0 +1,89 @@
+.. _cn_api_fluid_dygraph_ReduceLROnPlateau:
+
+ReduceLROnPlateau
+-------------------------------
+
+**注意:该API仅支持【动态图】模式**
+
+.. py:class:: paddle.fluid.dygraph.ReduceLROnPlateau(learning_rate, mode='min', decay_rate=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8, dtype='float32')
+
+该API为 ``loss`` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率(如果将 ``mode`` 设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升时降低学习率)。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。
+
+``loss`` 是传入到该类方法 ``step`` 中的参数,其必须是shape为[1]的1-D Tensor。 如果 ``loss`` 停止下降(``mode`` 为 `min` 时)超过 ``patience`` 个epoch,学习率将会减小为
+`learning_rate * decay_rate` 。
+
+此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个epoch的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。
+在冷静期之后,会继续监控 ``loss`` 的上升或下降。
+
+参数:
+ - **learning_rate** (Variable|float|int) - 初始学习率。其类型可以是Python的float类型,如果输入int类型则会被转为float类型。其也可以是shape为[1]的
+ 1-D Tensor,且相应数据类型必须为 "float32" 或 "float64" 。
+ - **mode** (str,可选) - `'min'` 和 `'max'` 之一。通常情况下,为 `'min'` ,此时当 ``loss`` 停止下降时学习率将减小。默认:`'min'` 。
+ (注意:仅在特殊用法时,可以将其设置为 `'max'` ,此时判断逻辑相反, ``loss`` 停止上升学习率才减小)
+ - **decay_rate** (float,可选) - 学习率衰减的比例。`new_lr = origin_lr * decay_rate` ,它是值小于1.0的float型数字,默认: 0.1。
+ - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个epoch没有下降(mode: 'min')或上升(mode: 'max')时,学习率才会减小。默认:10。
+ - **verbose** (bool,可选) - 如果为 ``True`` , 会在每次更新optimizer中的learning_rate时,打印信息。默认:``False`` 。
+ - **threshold** (float,可选) - ``threshold`` 和 ``threshold_mode`` 两个参数将会决定 ``loss`` 最小变化的阈值。小于该阈值的变化
+ 将会被忽视。默认:1e-4。
+ - **threshold_mode** (str,可选) - `'rel'` 和 `'abs'` 之一。在 `'rel'` 模式下, ``loss`` 最小变化的阈值是 `last_loss * threshold` ,
+ 其中 ``last_loss`` 是 ``loss`` 在上个epoch的值。在 `'abs'` 模式下,``loss`` 最小变化的阈值是 `threshold` 。 默认:`'rel'`。
+ - **cooldown** (int,可选) - 在学习速率每次减小之后,会进入时长为 ``cooldown`` 个epoch的冷静期。默认:0。
+ - **min_lr** (float,可选) - 最小的学习率。减小后的学习率最低下界限。默认:0。
+ - **eps** (float,可选) - 如果新旧学习率间的差异小于 ``eps`` ,则不会更新。默认值:1e-8。
+ - **dtype** (str,可选) – 学习率值的数据类型,可以为"float32", "float64"。默认:"float32"。
+
+返回: ``loss`` 自适应的学习率
+
+返回类型:Variable
+
+**代码示例**:
+
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ with fluid.dygraph.guard():
+ x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
+ linear = fluid.dygraph.Linear(10, 10)
+ input = fluid.dygraph.to_variable(x)
+
+ adam = fluid.optimizer.Adam(
+ learning_rate = fluid.dygraph.ReduceLROnPlateau(
+ learning_rate = 1.0,
+ decay_rate = 0.5,
+ patience = 5,
+ verbose = True,
+ cooldown = 3),
+ parameter_list = linear.parameters())
+
+ for epoch in range(10):
+ total_loss = 0
+ for bath_id in range(5):
+ out = linear(input)
+ loss = fluid.layers.reduce_mean(out)
+ total_loss += loss
+ adam.minimize(loss)
+
+ avg_loss = total_loss/5
+
+ # 根据传入total_loss,调整学习率
+ reduce_lr.step(avg_loss)
+ lr = adam.current_step_lr()
+ print("current avg_loss is %s, current lr is %s" % (avg_loss.numpy()[0], lr))
+
+
+
+.. py:method:: step(loss)
+需要在每个epoch调用该方法,其根据传入的 ``loss`` 调整optimizer中的学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。
+
+参数:
+ - **loss** (Variable) - 类型:Variable,shape为[1]的1-D Tensor。将被用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个epochs没有下降,
+ 将会降低学习率。
+
+返回:
+ 无
+
+**代码示例**:
+
+ 参照其类中的说明。
diff --git a/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst b/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst
index ed66dcfb5bc6a3957291f63cab26161a85161471..b39b4a556aae0612cff2ea4d33a7fbc0af10107a 100644
--- a/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/Sequential_cn.rst
@@ -5,6 +5,9 @@ Sequential
.. py:class:: paddle.fluid.dygraph.Sequential(*layers)
+
+
+
顺序容器。子Layer将按构造函数参数的顺序添加到此容器中。传递给构造函数的参数可以Layers或可迭代的name Layer元组。
参数:
diff --git a/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst b/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst
index 471ccd5536184d3b94757a9a03b9c1576572ed1c..d1677c60a870a6214860b5bb49418b35805d5bd9 100644
--- a/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/SpectralNorm_cn.rst
@@ -5,6 +5,12 @@ SpectralNorm
.. py:class:: paddle.fluid.dygraph.SpectralNorm(weight_shape, dim=0, power_iters=1, eps=1e-12, name=None, dtype="float32")
+:alias_main: paddle.nn.SpectralNorm
+:alias: paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm
+:old_api: paddle.fluid.dygraph.SpectralNorm
+
+
+
该接口用于构建 ``SpectralNorm`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其中实现了谱归一化层的功能,用于计算fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量维度相同。谱特征值计算方式如下:
步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。
diff --git a/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst b/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0016cf85752bff268a481a389f37f69e964414b6
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/StepDecay_cn.rst
@@ -0,0 +1,73 @@
+.. _cn_api_fluid_dygraph_StepDecay:
+
+StepDecay
+-------------------------------
+
+
+.. py:class:: paddle.fluid.dygraph.StepDecay(learning_rate, step_size, decay_rate=0.1)
+
+:api_attr: 命令式编程模式(动态图)
+
+
+该接口提供 ``step_size`` 衰减学习率的功能,每经过 ``step_size`` 个 ``epoch`` 时会通过 ``decay_rate`` 衰减一次学习率。
+
+算法可以描述为:
+
+.. code-block:: text
+
+ learning_rate = 0.5
+ step_size = 30
+ decay_rate = 0.1
+ learning_rate = 0.5 if epoch < 30
+ learning_rate = 0.05 if 30 <= epoch < 60
+ learning_rate = 0.005 if 60 <= epoch < 90
+ ...
+
+参数:
+ - **learning_rate** (float|int) - 初始化的学习率。可以是Python的float或int。
+ - **step_size** (int) - 学习率每衰减一次的间隔。
+ - **decay_rate** (float, optional) - 学习率的衰减率。 ``new_lr = origin_lr * decay_rate`` 。其值应该小于1.0。默认:0.1。
+
+返回: 无
+
+**代码示例**:
+
+ .. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ with fluid.dygraph.guard():
+ x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
+ linear = fluid.dygraph.Linear(10, 10)
+ input = fluid.dygraph.to_variable(x)
+ scheduler = fluid.dygraph.StepDecay(0.5, step_size=3)
+ adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
+ for epoch in range(9):
+ for batch_id in range(5):
+ out = linear(input)
+ loss = fluid.layers.reduce_mean(out)
+ adam.minimize(loss)
+ scheduler.epoch()
+ print("epoch:{}, current lr is {}" .format(epoch, adam.current_step_lr()))
+ # epoch:0, current lr is 0.5
+ # epoch:1, current lr is 0.5
+ # epoch:2, current lr is 0.5
+ # epoch:3, current lr is 0.05
+ # epoch:4, current lr is 0.05
+ # epoch:5, current lr is 0.05
+ # epoch:6, current lr is 0.005
+ # epoch:7, current lr is 0.005
+ # epoch:8, current lr is 0.005
+
+.. py:method:: epoch(epoch=None)
+通过当前的 epoch 调整学习率,调整后的学习率将会在下一次调用 ``optimizer.minimize`` 时生效。
+
+参数:
+ - **epoch** (int|float,可选) - 类型:int或float。指定当前的epoch数。默认:无,此时将会自动累计epoch数。
+
+返回:
+ 无
+
+**代码示例**:
+
+ 参照上述示例代码。
diff --git a/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst b/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst
index cb014477f5bbc3fac89e774a391214d45ae3434a..342cc84f4c1a0992e5b1628956c9a4534e368770 100644
--- a/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/TracedLayer_cn.rst
@@ -3,10 +3,13 @@
TracedLayer
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:class:: paddle.fluid.dygraph.TracedLayer(program, parameters, feed_names, fetch_names)
+:api_attr: 命令式编程模式(动态图)
+
+
+
TracedLayer用于将前向动态图模型转换为静态图模型,主要用于将动态图保存后做在线C++预测。除此以外,用户也可使用转换后的静态图模型在Python端做预测,通常比原先的动态图性能更好。
TracedLayer使用 ``Executor`` 和 ``CompiledProgram`` 运行静态图模型。转换后的静态图模型与原动态图模型共享参数。
diff --git a/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst b/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0924a155a0ffb8861dd0f233cf0b0eff3dd8b169
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/TranslatedLayer_cn.rst
@@ -0,0 +1,84 @@
+.. _cn_api_fluid_dygraph_TranslatedLayer:
+
+TranslatedLayer
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.TranslatedLayer(programs, persistable_vars)
+
+``TranslatedLayer`` 是一个命令式编程模式 :ref:`cn_api_fluid_dygraph_Layer` 的继承类,
+通过 :ref:`cn_api_fluid_dygraph_jit_load` 载入构建。能够像一般 ``Layer`` 一样在train或者eval模式下使用。
+
+.. note::
+ ``TranslatedLayer`` 对象不能够通过构造函数创建,仅能够通过 :ref:`cn_api_fluid_dygraph_jit_load` 接口载入构建。
+
+**示例代码:**
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ BATCH_SIZE = 32
+ BATCH_NUM = 20
+ def random_batch_reader():
+ def _get_random_images_and_labels(image_shape, label_shape):
+ image = np.random.random(size=image_shape).astype('float32')
+ label = np.random.random(size=label_shape).astype('int64')
+ return image, label
+ def __reader__():
+ for _ in range(BATCH_NUM):
+ batch_image, batch_label = _get_random_images_and_labels(
+ [BATCH_SIZE, 784], [BATCH_SIZE, 1])
+ yield batch_image, batch_label
+ return __reader__
+ class LinearNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(LinearNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ return self._linear(x)
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 1. 训练存储模型.
+ # 创建网络
+ net = LinearNet(784, 1)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ # 创建DataLoader
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(random_batch_reader())
+ # 训练
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = net(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ adam.minimize(avg_loss)
+ net.clear_gradients()
+ model_path = "linear.example.model"
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[img])
+ # 2. 载入模型构建TranslatedLayer
+ translated_layer = fluid.dygraph.jit.load(model_path)
+ # 预测
+ translated_layer.eval()
+ x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32'))
+ pred = translated_layer(x)
+ # fine-tune训练
+ translated_layer.train()
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=translated_layer.parameters())
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(random_batch_reader())
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = translated_layer(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ adam.minimize(avg_loss)
+ translated_layer.clear_gradients()
diff --git a/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst b/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst
index 23033ddb57b926eac22edc8d135181fdea9222f5..699a3f71ec5feb18f3da4d86f0c8df7566cb5c82 100644
--- a/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/TreeConv_cn.rst
@@ -5,6 +5,9 @@ TreeConv
.. py:class:: paddle.fluid.dygraph.TreeConv(feature_size, output_size, num_filters=1, max_depth=2, act='tanh', param_attr=None, bias_attr=None, name=None, dtype="float32")
+
+
+
该接口用于构建 ``TreeConv`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个基于树结构的卷积(Tree-Based Convolution)运算。基于树的卷积是基于树的卷积神经网络(TBCNN,Tree-Based Convolution Neural Network)的一部分,它用于对树结构进行分类,例如抽象语法树。 Tree-Based Convolution提出了一种称为连续二叉树的数据结构,它将多路(multiway)树视为二叉树。详情请参考: `基于树的卷积论文 `_ 。
diff --git a/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst b/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9920b5b7af2d6913189ac6d0255cea41995e524d
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/declarative_cn.rst
@@ -0,0 +1,32 @@
+.. _cn_api_fluid_dygraph_declarative:
+
+declarative
+-------------------------------
+
+.. py:decorator:: paddle.fluid.dygraph.jit.declarative
+
+本装饰器将函数内的动态图API转化为静态图API。此装饰器自动处理静态图模式下的Program和Executor,并将结果作为动态图Tensor返回。输出的动态图Tensor可以继续进行动态图训练、预测或其他运算。如果被装饰的函数里面调用其他动态图函数,被调用的函数也会被转化为静态图函数。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ from paddle.fluid.dygraph.jit import declarative
+
+ fluid.enable_dygraph()
+
+ @declarative
+ def func(x):
+ x = fluid.dygraph.to_variable(x)
+ if fluid.layers.mean(x) < 0:
+ x_v = x - 1
+ else:
+ x_v = x + 1
+ return x_v
+
+ x = np.ones([1, 2])
+ x_v = func(x)
+ print(x_v.numpy()) # [[2. 2.]]
+
diff --git a/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst b/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e5716e76456a99ba5724369d4c2aaba7bfa129f8
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/enabled_cn.rst
@@ -0,0 +1,25 @@
+.. _cn_api_fluid_dygraph_enabled:
+
+enabled
+-------------------------------
+
+.. py:method:: paddle.fluid.dygraph.enabled()
+
+这个函数用于检查程序是否运行在动态图模式。你可以使用 :ref:`cn_api_fluid_dygraph_guard` api进入动态图模式。或者使用 :ref:`cn_api_fluid_enable_dygraph` 和 :ref:`cn_api_fluid_disable_dygraph` api打开、关闭动态图模式。
+
+注意: `fluid.dygraph.enabled` 实际上调用了 :ref:`cn_api_fluid_in_dygraph_mode` api,所以推荐使用 :ref:`cn_api_fluid_in_dygraph_mode` api。
+
+返回: 程序是否运行在动态图模式。
+
+返回类型: bool
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ fluid.enable_dygraph() # Now we are in dygragh mode
+ print(fluid.dygraph.enabled()) # True
+ fluid.disable_dygraph()
+ print(fluid.dygraph.enabled()) # False
diff --git a/doc/fluid/api_cn/dygraph_cn/grad_cn.rst b/doc/fluid/api_cn/dygraph_cn/grad_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1b164d104b6a6432b11b641431165186700b9381
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/grad_cn.rst
@@ -0,0 +1,102 @@
+.. _cn_api_paddle_grad:
+
+grad
+-------------------------------
+
+**注意:该API仅支持【动态图】模式**
+
+.. py:method:: paddle.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False, no_grad_vars=None)
+
+对于每个 `inputs` ,计算所有 `outputs` 相对于其的梯度和。
+
+参数:
+ - **outputs** (Tensor|list(Tensor)|tuple(Tensor)) – 用于计算梯度的图的输出变量,或多个输出变量构成的list/tuple。
+ - **inputs** (Tensor|list(Tensor)|tuple(Tensor)) - 用于计算梯度的图的输入变量,或多个输入变量构成的list/tuple。该API的每个返回值对应每个 `inputs` 的梯度。
+ - **grad_outputs** (Tensor|list(Tensor|None)|tuple(Tensor|None), 可选) - `outputs` 变量梯度的初始值。若 `grad_outputs` 为None,则 `outputs` 梯度的初始值均为全1的Tensor。若 `grad_outputs` 不为None,它必须与 `outputs` 的长度相等,此时,若 `grad_outputs` 的第i个元素为None,则第i个 `outputs` 的梯度初始值为全1的Tensor;若 `grad_outputs` 的第i个元素为Tensor,则第i个 `outputs` 的梯度初始值为 `grad_outputs` 的第i个元素。默认值为None。
+ - **retain_graph** (bool, 可选) - 是否保留计算梯度的前向图。若值为True,则前向图会保留,用户可对同一张图求两次反向。若值为False,则前向图会释放。默认值为None,表示值与 `create_graph` 相等。
+ - **create_graph** (bool, 可选) - 是否创建计算过程中的反向图。若值为True,则可支持计算高阶导数。若值为False,则计算过程中的反向图会释放。默认值为False。
+ - **only_inputs** (bool, 可选) - 是否只计算 `inputs` 的梯度。若值为False,则图中所有叶节点变量的梯度均会计算,并进行累加。若值为True,则只会计算 `inputs` 的梯度。默认值为True。only_inputs=False功能正在开发中,目前尚不支持。
+ - **allow_unused** (bool, 可选) - 决定当某些 `inputs` 变量不在计算图中时抛出错误还是返回None。若某些 `inputs` 变量不在计算图中(即它们的梯度为None),则当allowed_unused=False时会抛出错误,当allow_unused=True时会返回None作为这些变量的梯度。默认值为False。
+ - **no_grad_vars** (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), 可选) - 指明不需要计算梯度的变量。默认值为None。
+
+返回: tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第i个返回的变量是所有 `outputs` 相对于第i个 `inputs` 的梯度之和。
+
+**示例代码 1**
+ .. code-block:: python
+
+ import paddle
+ paddle.disable_static()
+
+ def test_dygraph_grad(create_graph):
+ x = paddle.ones(shape=[1], dtype='float32')
+ x.stop_gradient = False
+ y = x * x
+
+ # Since y = x * x, dx = 2 * x
+ dx = paddle.grad(
+ outputs=[y],
+ inputs=[x],
+ create_graph=create_graph,
+ retain_graph=True)[0]
+
+ z = y + dx
+
+ # If create_graph = False, the gradient of dx
+ # would not be backpropagated. Therefore,
+ # z = x * x + dx, and x.gradient() = 2 * x = 2.0
+
+ # If create_graph = True, the gradient of dx
+ # would be backpropagated. Therefore,
+ # z = x * x + dx = x * x + 2 * x, and
+ # x.gradient() = 2 * x + 2 = 4.0
+
+ z.backward()
+ return x.gradient()
+
+ print(test_dygraph_grad(create_graph=False)) # [2.]
+ print(test_dygraph_grad(create_graph=True)) # [4.]
+
+**示例代码 2**
+ .. code-block:: python
+
+ import paddle
+ paddle.disable_static()
+
+ def test_dygraph_grad(grad_outputs=None):
+ x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32')
+ x.stop_gradient = False
+
+ y1 = x * x
+ y2 = x * 3
+
+ # If grad_outputs=None, dy1 = [1], dy2 = [1].
+ # If grad_outputs=[g1, g2], then:
+ # - dy1 = [1] if g1 is None else g1
+ # - dy2 = [1] if g2 is None else g2
+
+ # Since y1 = x * x, dx = 2 * x * dy1.
+ # Since y2 = x * 3, dx = 3 * dy2.
+ # Therefore, the final result would be:
+ # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2.
+
+ dx = paddle.grad(
+ outputs=[y1, y2],
+ inputs=[x],
+ grad_outputs=grad_outputs)[0]
+
+ return dx.numpy()
+
+ grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32')
+
+ # dy1 = [1], dy2 = [1]
+ print(test_dygraph_grad(None)) # [7.]
+
+ # dy1 = [1], dy2 = [4]
+ print(test_dygraph_grad([None, grad_value])) # [16.]
+
+ # dy1 = [4], dy2 = [1]
+ print(test_dygraph_grad([grad_value, None])) # [19.]
+
+ # dy1 = [3], dy2 = [4]
+ grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32')
+ print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
\ No newline at end of file
diff --git a/doc/fluid/api_cn/dygraph_cn/guard_cn.rst b/doc/fluid/api_cn/dygraph_cn/guard_cn.rst
index b8dd4156daa5806b5be2678f884b16f3c599dd8f..651e8b6b5998545b5b8cf6553bc39c5b59495c25 100644
--- a/doc/fluid/api_cn/dygraph_cn/guard_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/guard_cn.rst
@@ -3,10 +3,13 @@
guard
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:function:: paddle.fluid.dygraph.guard(place=None)
+:api_attr: 命令式编程模式(动态图)
+
+
+
通过with语句创建一个dygraph运行的context,执行context代码。
参数:
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6899679ce577b0ae25304c638fa9a618b8e060cd
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn.rst
@@ -0,0 +1,12 @@
+===
+jit
+===
+
+.. toctree::
+ :maxdepth: 1
+
+ jit_cn/save_cn.rst
+ jit_cn/set_code_level_cn.rst
+ jit_cn/set_verbosity_cn.rst
+ jit_cn/load_cn.rst
+ jit_cn/SaveLoadConfig_cn.rst
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cbee1bab234be6f53f83061c52139093513d321b
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/SaveLoadConfig_cn.rst
@@ -0,0 +1,273 @@
+.. _cn_api_fluid_dygraph_jit_SaveLoadConfig:
+
+SaveLoadConfig
+-------------------------------
+
+.. py:class:: paddle.fluid.dygraph.jit.SaveLoadConfig()
+
+用于配置接口 :ref:`cn_api_fluid_dygraph_jit_save` 和 :ref:`cn_api_fluid_dygraph_jit_load` 存储载入 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 时的附加选项。
+
+**示例代码:**
+
+ 1. 在存储模型时使用 ``SaveLoadConfig``
+
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ return z
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 训练模型
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ out = net(x)
+ loss = fluid.layers.mean(out)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ # 在存储模型时使用SaveLoadConfig
+ model_path = "simplenet.example.model"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ configs.model_filename = "__simplenet__"
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[x],
+ configs=configs)
+
+ 2. 在载入模型时使用 ``SaveLoadConfig``
+
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 在载入模型时使用SaveLoadconfig
+ model_path = "simplenet.example.model"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ configs.model_filename = "__simplenet__"
+ infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
+ # 预测
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ pred = infer_net(x)
+
+属性
+::::::::::::
+
+.. py:attribute:: output_spec
+
+选择保存模型( :ref:`cn_api_fluid_dygraph_TranslatedLayer` )的输出变量,通过指定的这些变量能够使模型仅计算特定的结果。
+默认情况下,原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量都将配置为存储后模型 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 的输出变量。
+
+``output_spec`` 属性类型需要是 ``list[Variable]``。如果输入的 ``output_spec`` 列表不是原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量,
+将会依据输入的 ``output_spec`` 列表对存储的模型进行裁剪。
+
+.. note::
+ ``output_spec`` 属性仅在存储模型时使用。
+
+**示例代码:**
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ loss = fluid.layers.mean(z)
+ return z, loss
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 训练模型
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ out, loss = net(x)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ # 使用SaveLoadconfig.output_spec
+ model_path = "simplenet.example.model.output_spec"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ # 仅在存储模型中保留预测结果,丢弃loss
+ configs.output_spec = [out]
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[x],
+ configs=configs)
+ infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ # 仅有预测结果输出
+ pred = infer_net(x)
+
+
+.. py:attribute:: model_filename
+
+存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。
+
+**示例代码**
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ return z
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 训练模型
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ out = net(x)
+ loss = fluid.layers.mean(out)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ model_path = "simplenet.example.model.model_filename"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ configs.model_filename = "__simplenet__"
+ # 配置configs.model_filename存储模型
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[x],
+ configs=configs)
+ # [结果] 存储模型目录文件包括:
+ # __simplenet__ __variables__ __variables.info__
+ # 配置configs.model_filename载入模型
+ infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ pred = infer_net(x)
+
+
+.. py:attribute:: params_filename
+
+存储转写 :ref:`cn_api_fluid_dygraph_Layer` 所有持久参数(包括 ``Parameters`` 和持久的 ``Buffers``)的文件名称。默认文件名称为 ``__variable__``。
+
+**示例代码**
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ return z
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 训练模型
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ out = net(x)
+ loss = fluid.layers.mean(out)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ model_path = "simplenet.example.model.params_filename"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ configs.params_filename = "__params__"
+ # 配置configs.params_filename存储模型
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[x],
+ configs=configs)
+ # [结果] 存储模型目录文件包括:
+ # __model__ __params__ __variables.info__
+ # 配置configs.params_filename载入模型
+ infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ pred = infer_net(x)
+
+
+.. py:attribute:: separate_params
+
+配置是否将 :ref:`cn_api_fluid_dygraph_Layer` 的参数存储为分散的文件。
+(这是为了兼容接口 :ref:`cn_api_fluid_io_save_inference_model` 的行为)
+
+如果设置为 ``True`` ,每个参数将会被存储为一个文件,文件名为参数名,同时``SaveLoadConfig.params_filename`` 指定的文件名将不会生效。默认为 ``False``。
+
+**示例代码**
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ class SimpleNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ return z
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 训练模型
+ net = SimpleNet(8, 8)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ for i in range(10):
+ out = net(x)
+ loss = fluid.layers.mean(out)
+ loss.backward()
+ adam.minimize(loss)
+ net.clear_gradients()
+ model_path = "simplenet.example.model.separate_params"
+ configs = fluid.dygraph.jit.SaveLoadConfig()
+ configs.separate_params = True
+ # 配置configs.separate_params存储模型
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[x],
+ configs=configs)
+ # [结果] 存储模型目录文件包括:
+ # linear_0.b_0 linear_0.w_0 __model__ __variables.info__
+ # 配置configs.params_filename载入模型
+ infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
+ x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
+ pred = infer_net(x)
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f001976971c5e84eb93c62fa5a5d77c8f2a9a335
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/load_cn.rst
@@ -0,0 +1,168 @@
+.. _cn_api_fluid_dygraph_jit_load:
+
+load
+-----------------
+
+.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None)
+
+:api_attr: 命令式编程模式(动态图)
+
+将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。
+
+.. note::
+ 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的,
+ 在使用它进行fine-tune训练时会存在一些局限:
+ 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用;
+ 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法;
+ 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复;
+ 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。
+
+参数:
+ - **model_path** (str) - 存储模型的目录。
+ - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
+
+返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。
+
+**示例代码**
+
+1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。
+
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ BATCH_SIZE = 32
+ BATCH_NUM = 20
+ def random_batch_reader():
+ def _get_random_images_and_labels(image_shape, label_shape):
+ image = np.random.random(size=image_shape).astype('float32')
+ label = np.random.random(size=label_shape).astype('int64')
+ return image, label
+ def __reader__():
+ for _ in range(BATCH_NUM):
+ batch_image, batch_label = _get_random_images_and_labels(
+ [BATCH_SIZE, 784], [BATCH_SIZE, 1])
+ yield batch_image, batch_label
+ return __reader__
+ class LinearNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(LinearNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ return self._linear(x)
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 1. 训练存储模型.
+ # 创建网络
+ net = LinearNet(784, 1)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ # 创建DataLoader
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(random_batch_reader())
+ # 训练
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = net(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ adam.minimize(avg_loss)
+ net.clear_gradients()
+ model_path = "linear.example.model"
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[img])
+ # 2. 载入模型 & 预测
+ # 载入模型
+ infer_net = fluid.dygraph.jit.load(model_path)
+ # 预测
+ x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32'))
+ pred = infer_net(x)
+ # 3. 载入模型 & fine-tune训练
+ # 载入模型
+ train_net = fluid.dygraph.jit.load(model_path)
+ train_net.train()
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters())
+ # 创建DataLoader
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(random_batch_reader())
+ # fine-tune训练
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = train_net(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ adam.minimize(avg_loss)
+ train_net.clear_gradients()
+
+
+2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。
+
+ .. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ BATCH_SIZE = 32
+ BATCH_NUM = 20
+ def random_batch_reader():
+ def _get_random_images_and_labels(image_shape, label_shape):
+ image = np.random.random(size=image_shape).astype('float32')
+ label = np.random.random(size=label_shape).astype('int64')
+ return image, label
+ def __reader__():
+ for _ in range(BATCH_NUM):
+ batch_image, batch_label = _get_random_images_and_labels(
+ [BATCH_SIZE, 784], [BATCH_SIZE, 1])
+ yield batch_image, batch_label
+ return __reader__
+ img = fluid.data(name='img', shape=[None, 784], dtype='float32')
+ label = fluid.data(name='label', shape=[None, 1], dtype='int64')
+ pred = fluid.layers.fc(input=img, size=10, act='softmax')
+ loss = fluid.layers.cross_entropy(input=pred, label=label)
+ avg_loss = fluid.layers.mean(loss)
+ optimizer = fluid.optimizer.SGD(learning_rate=0.001)
+ optimizer.minimize(avg_loss)
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ exe.run(fluid.default_startup_program())
+ loader = fluid.io.DataLoader.from_generator(
+ feed_list=[img, label], capacity=5, iterable=True)
+ loader.set_batch_generator(random_batch_reader(), places=place)
+ # 1. 训练 & 存储预测模型
+ for data in loader():
+ exe.run(
+ fluid.default_main_program(),
+ feed=data,
+ fetch_list=[avg_loss])
+ model_path = "fc.example.model"
+ fluid.io.save_inference_model(
+ model_path, ["img"], [pred], exe)
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 2. 载入模型 & 预测
+ fc = fluid.dygraph.jit.load(model_path)
+ x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32'))
+ pred = fc(x)
+ # 3. 载入模型 & fine-tune训练
+ fc = fluid.dygraph.jit.load(model_path)
+ fc.train()
+ sgd = fluid.optimizer.SGD(learning_rate=0.001,
+ parameter_list=fc.parameters())
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(
+ random_batch_reader(), places=place)
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = fc(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ sgd.minimize(avg_loss)
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f0276316bacd0d7b7cb7ef6df12b1f9ac08b759f
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/save_cn.rst
@@ -0,0 +1,80 @@
+.. _cn_api_fluid_dygraph_jit_save:
+
+save
+-----------------
+
+.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None)
+
+将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型,
+载入后可用于预测推理或者fine-tune训练。
+
+该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。
+
+默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``,
+同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。
+
+存储的模型能够被以下API载入使用:
+ - :ref:`cn_api_fluid_dygraph_jit_load`
+ - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` )
+ - 其他预测库API
+
+参数:
+ - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。
+ - **model_path** (str) - 存储模型的目录。
+ - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。
+ - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
+
+返回:无
+
+**示例代码**
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.fluid.dygraph import Linear
+ from paddle.fluid.dygraph import declarative
+ BATCH_SIZE = 32
+ BATCH_NUM = 20
+ def random_batch_reader():
+ def _get_random_images_and_labels(image_shape, label_shape):
+ image = np.random.random(size=image_shape).astype('float32')
+ label = np.random.random(size=label_shape).astype('int64')
+ return image, label
+ def __reader__():
+ for _ in range(BATCH_NUM):
+ batch_image, batch_label = _get_random_images_and_labels(
+ [BATCH_SIZE, 784], [BATCH_SIZE, 1])
+ yield batch_image, batch_label
+ return __reader__
+ class LinearNet(fluid.dygraph.Layer):
+ def __init__(self, in_size, out_size):
+ super(LinearNet, self).__init__()
+ self._linear = Linear(in_size, out_size)
+ @declarative
+ def forward(self, x):
+ return self._linear(x)
+ # 开启命令式编程模式
+ fluid.enable_dygraph()
+ # 创建网络
+ net = LinearNet(784, 1)
+ adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
+ # 创建DataLoader
+ train_loader = fluid.io.DataLoader.from_generator(capacity=5)
+ train_loader.set_batch_generator(random_batch_reader())
+ # 训练
+ for data in train_loader():
+ img, label = data
+ label.stop_gradient = True
+ cost = net(img)
+ loss = fluid.layers.cross_entropy(cost, label)
+ avg_loss = fluid.layers.mean(loss)
+ avg_loss.backward()
+ adam.minimize(avg_loss)
+ net.clear_gradients()
+ # 存储模型
+ model_path = "linear.example.model"
+ fluid.dygraph.jit.save(
+ layer=net,
+ model_path=model_path,
+ input_spec=[img])
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..73b0830cdb7655a5b5246cdd95831264ef77d847
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_code_level_cn.rst
@@ -0,0 +1,34 @@
+.. _cn_api_fluid_dygraph_jit_set_code_level:
+
+set_code_level
+-----------------
+
+.. py:function:: paddle.fluid.dygraph.jit.set_code_level(level=100)
+
+设置代码级别,打印该级别 AST Transformer 转化后的代码。
+
+有两种方法设置代码级别:
+
+1. 调用函数 ``set_code_level``
+2. 设置环境变量 ``TRANSLATOR_CODE_LEVEL``
+
+.. note::
+ 函数 ``set_code_level`` 的优先级高于环境变量 ``TRANSLATOR_CODE_LEVEL``。
+
+
+参数:
+ - **level** (int) - 打印的代码级别。默认值为100,这意味着打印的是所有 AST Transformer 转化后的代码。
+
+**示例代码**
+
+.. code-block:: python
+
+ import os
+ import paddle
+
+ paddle.jit.set_code_level(2)
+ # It will print the transformed code at level 2, which means to print the code after second transformer,
+ # as the date of August 28, 2020, it is CastTransformer.
+
+ os.environ['TRANSLATOR_CODE_LEVEL'] = '3'
+ # The code level is now 3, but it has no effect because it has a lower priority than `set_code_level`
diff --git a/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c185e94e5d28dc8ada4c7a8af854c83bbcc557bf
--- /dev/null
+++ b/doc/fluid/api_cn/dygraph_cn/jit_cn/set_verbosity_cn.rst
@@ -0,0 +1,35 @@
+.. _cn_api_fluid_dygraph_jit_set_verbosity:
+
+set_verbosity
+-----------------
+
+.. py:function:: paddle.fluid.dygraph.jit.set_verbosity(level=0)
+
+设置动态图转静态图的日志详细级别。
+
+有两种方法设置日志详细级别:
+
+1. 调用函数 ``set_verbosity``
+2. 设置环境变量 ``TRANSLATOR_VERBOSITY``
+
+.. note::
+ 函数 ``set_verbosity`` 的优先级高于环境变量 ``TRANSLATOR_VERBOSITY``。
+
+
+参数:
+ - **level** (int) - 日志详细级别。值越大,表示越详细。默认值为0,表示不显示日志。
+
+**示例代码**
+
+.. code-block:: python
+
+ import os
+ import paddle
+
+ paddle.jit.set_verbosity(1)
+ # The verbosity level is now 1
+
+ os.environ['TRANSLATOR_VERBOSITY'] = '3'
+ # The verbosity level is now 3, but it has no effect because it has a lower priority than `set_verbosity`
+
+
diff --git a/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst
index 3219e2d0490d0f7714d6fb7d2cda1103aa0f6b9c..39b18d7830eaafa66c6a99a770cf8e85b8fc32b5 100644
--- a/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/load_dygraph_cn.rst
@@ -3,10 +3,13 @@
load_dygraph
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:function:: paddle.fluid.dygraph.load_dygraph(model_path)
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口尝试从磁盘中加载参数或优化器的 ``dict`` 。
该接口会同时加载 ``model_path + ".pdparams"`` 和 ``model_path + ".pdopt"`` 中的内容。
@@ -32,7 +35,8 @@ load_dygraph
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy")
- adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000) )
+ adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000) ,
+ parameter_list = emb.parameters() )
state_dict = adam.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy")
diff --git a/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst b/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst
index 32083c7dc686308db11ff68a9417d05880b99461..9c84e82da4a895d898ea34154d71a19190f744eb 100644
--- a/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/no_grad_cn.rst
@@ -3,31 +3,50 @@
no_grad
-------------------------------
-**注意:该API仅支持【动态图】模式**
-.. py:method:: paddle.fluid.dygraph.no_grad(func)
+.. py:class:: paddle.fluid.dygraph.no_grad
-在动态图模式中,此装饰器将会避免 ``func`` 被装饰时创建反向传播网络。
+:api_attr: 命令式编程模式(动态图)
+:old_api: paddle.fluid.dygraph.no_grad
-参数:
- - **func** (str) – 不需要梯度的函数。
+
+创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有stop_gradient=True。
+
+也可以用作一个装饰器(需要创建实例对象作为装饰器)。
**代码示例**
.. code-block:: python
-
import numpy as np
import paddle.fluid as fluid
- @fluid.dygraph.no_grad
+ paddle.enable_imperative()
+
+ # 用作生成器
+
+ data = np.array([[2, 3], [4, 5]]).astype('float32')
+ l0 = fluid.Linear(2, 2) # l0.weight.gradient() is None
+ l1 = fluid.Linear(2, 2)
+ with fluid.no_grad():
+ # l1.weight.stop_gradient is False
+ tmp = l1.weight * 2 # tmp.stop_gradient is True
+ x = fluid.dygraph.to_variable(data)
+ y = l0(x) + tmp
+ o = l1(y)
+ o.backward()
+ print(tmp.gradient() is None) # True
+ print(l0.weight.gradient() is None) # False
+
+ # 用作装饰器
+
+ @fluid.no_grad()
def test_layer():
- with fluid.dygraph.guard():
- inp = np.ones([3, 1024], dtype='float32')
- t = fluid.dygraph.base.to_variable(inp)
- linear1 = fluid.Linear(1024, 4, bias_attr=False)
- linear2 = fluid.Linear(4, 4)
- ret = linear1(t)
- dy_ret = linear2(ret)
+ inp = np.ones([3, 1024], dtype='float32')
+ t = fluid.dygraph.base.to_variable(inp)
+ linear1 = fluid.Linear(1024, 4, bias_attr=False)
+ linear2 = fluid.Linear(4, 4)
+ ret = linear1(t)
+ dy_ret = linear2(ret)
test_layer()
diff --git a/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst b/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst
index 295cc0bb89fcb932ebb571411f41719c4adce91d..cd18f8d75081a1fbb868e6b92dd890465bbccb29 100644
--- a/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/prepare_context_cn.rst
@@ -5,6 +5,10 @@ prepare_context
.. py:class:: paddle.fluid.dygraph.prepare_context(strategy=None)
+:api_attr: 命令式编程模式(动态图)
+
+
+
该API是进行多进程多卡训练的环境配置接口,接受一个ParallelStrategy结构体变量作为输入。当strategy属性中的nums_trainer小于2时,API会直接返回,当nums_trainer大于1且为CUDAPlace时,由于目前动态图模式仅支持GPU多卡训练,仅能配置NCCL多卡训练的环境,所以此时会对NCCL环境进行配置,具体内容包括:生成NCCL ID,并广播至参与训练的各进程,用于支持的处理器同步操作,创建并配置NCCL通信器等。
参数:
diff --git a/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst b/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst
index cc28502a29d542127f03d23be3ee2cf8ad9fd6e0..e830d9c8d071908716d1439efbed07cca13a456e 100644
--- a/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/save_dygraph_cn.rst
@@ -3,10 +3,13 @@
save_dygraph
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:function:: paddle.fluid.dygraph.save_dygraph(state_dict, model_path)
+:api_attr: 命令式编程模式(动态图)
+
+
+
该接口将传入的参数或优化器的 ``dict`` 保存到磁盘上。
``state_dict`` 是通过 :ref:`cn_api_fluid_dygraph_Layer` 的 ``state_dict()`` 方法得到的。
@@ -29,15 +32,13 @@ save_dygraph
import paddle.fluid as fluid
with fluid.dygraph.guard():
- emb = fluid.dygraph.Embedding(
- size=[10, 32],
- param_attr='emb.w',
- is_sparse=False)
+ emb = fluid.dygraph.Embedding([10, 10])
+
state_dict = emb.state_dict()
- fluid.save_dygraph(state_dict, "paddle_dy") # 会保存为 paddle_dy.pdparams
+ fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdparams
+
+ adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000),
+ parameter_list = emb.parameters() )
- adam = fluid.optimizer.Adam(
- learning_rate=fluid.layers.noam_decay(100, 10000),
- parameter_list = emb.parameters())
state_dict = adam.state_dict()
- fluid.save_dygraph(state_dict, "paddle_dy") # 会保存为 paddle_dy.pdopt
+ fluid.save_dygraph( state_dict, "paddle_dy") # 会保存为 paddle_dy.pdopt
\ No newline at end of file
diff --git a/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst b/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst
index 60bb44397abd23986fdaa191316c5e2857847ac4..b562d2cafb0b5f90458ed194677ddee783118e1b 100644
--- a/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst
+++ b/doc/fluid/api_cn/dygraph_cn/to_variable_cn.rst
@@ -3,19 +3,26 @@
to_variable
-------------------------------
-**注意:该API仅支持【动态图】模式**
.. py:function:: paddle.fluid.dygraph.to_variable(value, name=None, zero_copy=None)
-该函数实现从numpy\.ndarray对象或者Variable对象创建一个 ``Variable`` 类型的对象。
+
+:api_attr: 命令式编程模式(动态图)
+
+
+
+该函数实现从tuple、list、numpy\.ndarray、Variable、ComplexVariable 对象创建一个 ``Variable`` 类型的对象。
+
参数:
- - **value** (ndarray|Variable) – 需要转换的numpy\.ndarray或Variable对象,维度可以为多维,数据类型为numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}中的一种。
+ - **value** (tuple|list|ndarray|Variable|Tensor|ComplexVariable) – 初始化的数据。可以是tuple、list、numpy\.ndarray、Variable、ComplexVariable。
+ 维度可以为多维,数据类型为numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}中的一种。
- **name** (str, 可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
- **zero_copy** (bool, 可选) – 是否与输入的numpy数组共享内存。此参数仅适用于CPUPlace,当它为None时将设置为True。默认值为None。
+ - **dtype** (str, 可选) - 返回的 ``Variable`` 所需的数据类型。可以是 'bool','float16','float32','float64','int8','int16','int32','int64','uint8'。默认值: None。
-返回:如果 ``value`` 是numpy\.ndarray对象,返回由numpy\.ndarray对象创建的 ``Tensor`` ,其数据类型和维度与 ``value`` 一致;如果 ``value`` 是Variable对象,返回 ``value`` 。
+返回:如果 ``value`` 是tuple/list/numpy\.ndarray对象,返回对应numpy\.ndarray对象创建的 ``Tensor`` ;如果 ``value`` 是Variable对象,直接返回 ``value`` 。
返回类型:Variable
@@ -25,13 +32,25 @@ to_variable
import numpy as np
import paddle.fluid as fluid
-
with fluid.dygraph.guard(fluid.CPUPlace()):
+
x = np.ones([2, 2], np.float32)
y = fluid.dygraph.to_variable(x, zero_copy=False)
x[0][0] = -1
y[0][0].numpy() # array([1.], dtype=float32)
+
y = fluid.dygraph.to_variable(x)
x[0][0] = 0
y[0][0].numpy() # array([0.], dtype=float32)
+ c = np.array([2+1j, 2])
+ z = fluid.dygraph.to_variable(c)
+ z.numpy() # array([2.+1.j, 2.+0.j])
+ z.dtype # 'complex128'
+
+ y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
+ y.shape # [3L, 2L]
+ y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')
+ y.shape # [3L, 2L]
+ y.dtype # core.VarDesc.VarType.INT32
+
diff --git a/doc/fluid/api_cn/executor_cn/Executor_cn.rst b/doc/fluid/api_cn/executor_cn/Executor_cn.rst
index f879d28d0aac0a6a14c3cda9494771d15198b5b1..273b6bc79031e78ee56f65b4f7dbf575748d6f6b 100644
--- a/doc/fluid/api_cn/executor_cn/Executor_cn.rst
+++ b/doc/fluid/api_cn/executor_cn/Executor_cn.rst
@@ -3,14 +3,17 @@
Executor
-------------------------------
-**注意:该API仅支持【静态图】模式**
-.. py:class:: paddle.fluid.executor.Executor (place)
+.. py:class:: paddle.fluid.executor.Executor (place=None)
-Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传入设备。
+:api_attr: 声明式编程模式(静态图)
+
+
+
+Executor支持单GPU、多GPU以及CPU运行。
参数:
- - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。
+ - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本来设置默认设备。当PaddlePaddle是CPU版时,默认运行设备将会设置为 `fluid.CPUPlace()` ;当PaddlePaddle是GPU版本时,默认执行设备将会设置为 `fluid.CUDAPlace(0)` 。默认值为None。
返回:初始化后的 ``Executor`` 对象
@@ -25,14 +28,18 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
import numpy
import os
- use_cuda = True
- place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
- exe = fluid.Executor(place)
+ # 显式设置运行设备
+ # use_cuda = True
+ # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ # exe = fluid.Executor(place)
+
+ # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备
+ exe = fluid.Executor()
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
- data = fluid.layers.data(name='X', shape=[1], dtype='float32')
+ data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
@@ -54,8 +61,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
# 否则fluid会把逻辑核的所有数目设为CPU_NUM,
# 在这种情况下,输入的batch size应大于CPU_NUM,
# 否则程序会异常中断。
- if not use_cuda:
- os.environ['CPU_NUM'] = str(2)
+
+ # 显式设置运行设备
+ # if not use_cuda:
+ # os.environ['CPU_NUM'] = str(2)
+
+ # 未显示设置运行设备且安装的Paddle为CPU版本
+ os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
@@ -83,7 +95,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
exe.close()
-.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True,use_program_cache=False)
+.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, use_prune=False)
执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。
@@ -96,6 +108,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
- **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。
- **return_numpy** (bool) – 该参数表示是否将返回返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。
- **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。
+ - **use_prune** (bool) – 该参数表示是否对输入的Program进行剪枝。如果该参数为True,输入的Program会在run之前根据 ``feed`` 和 ``fetch_list`` 进行剪枝,剪枝的逻辑是将产生 ``feed`` 的 ``Variable`` 和 ``Operator`` 以及不产生 ``fetch_list`` 的 ``Variable`` 和 ``Operator`` 进行裁剪。默认为:False,表示不进行剪枝。请注意,如果将 ``Optimizer.minimize()`` 方法返回的 ``tuple`` 传入 ``fetch_list`` 中,则 ``use_prune`` 会被重写为True,并且会开启剪枝。
返回:返回fetch_list中指定的变量值
@@ -117,7 +130,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
- data = fluid.layers.data(name='X', shape=[1], dtype='float32')
+ data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
@@ -162,8 +175,8 @@ train_from_dataset可以非常容易扩展到大规模分布式在线和离线
place = fluid.CPUPlace() # 通过设置place = fluid.CUDAPlace(0)使用GPU
exe = fluid.Executor(place)
- x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
- y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
+ x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64")
+ y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
@@ -197,12 +210,13 @@ train_from_dataset可以非常容易扩展到大规模分布式在线和离线
import paddle.fluid as fluid
place = fluid.CPUPlace() # 使用GPU时可设置place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
- x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
- y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
+ x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64")
+ y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
- exe.infer_from_dataset(program=fluid.default_main_program(),dataset=dataset)
+ exe.infer_from_dataset(program=fluid.default_main_program(),
+ dataset=dataset)
diff --git a/doc/fluid/api_cn/executor_cn/global_scope_cn.rst b/doc/fluid/api_cn/executor_cn/global_scope_cn.rst
index 1c7aec0a19a37676e77aefebd0781f2649ddf5e6..277bb23db092c0b44704ac5905a968f364d3bc7b 100644
--- a/doc/fluid/api_cn/executor_cn/global_scope_cn.rst
+++ b/doc/fluid/api_cn/executor_cn/global_scope_cn.rst
@@ -3,10 +3,13 @@
global_scope
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.global_scope()
+:api_attr: 声明式编程模式(静态图)
+
+
+
获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。
返回:全局/默认作用域实例
diff --git a/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst b/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst
index ced17a622b58e1179653400e8e690e9f3ffb26d7..e220cd8d451708031d2526534a686a852ba67807 100644
--- a/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst
+++ b/doc/fluid/api_cn/executor_cn/scope_guard_cn.rst
@@ -3,10 +3,13 @@
scope_guard
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.executor.scope_guard (scope)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口通过 python 的 ``with`` 语句切换作用域(scope)。
作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。
diff --git a/doc/fluid/api_cn/fluid_cn.rst b/doc/fluid/api_cn/fluid_cn.rst
old mode 100644
new mode 100755
index 054f39e772568dee67df2cb37acc0e81efc1857a..ae356abbb90744b68bbd3661c13bcc8a606351c6
--- a/doc/fluid/api_cn/fluid_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn.rst
@@ -23,11 +23,15 @@ fluid
fluid_cn/DataFeeder_cn.rst
fluid_cn/default_main_program_cn.rst
fluid_cn/default_startup_program_cn.rst
+ fluid_cn/disable_dygraph_cn.rst
+ fluid_cn/device_guard_cn.rst
fluid_cn/DistributeTranspiler_cn.rst
fluid_cn/DistributeTranspilerConfig_cn.rst
fluid_cn/embedding_cn.rst
+ fluid_cn/enable_dygraph_cn.rst
fluid_cn/ExecutionStrategy_cn.rst
fluid_cn/Executor_cn.rst
+ fluid_cn/get_flags_cn.rst
fluid_cn/global_scope_cn.rst
fluid_cn/gradients_cn.rst
fluid_cn/in_dygraph_mode_cn.rst
@@ -47,6 +51,8 @@ fluid
fluid_cn/require_version_cn.rst
fluid_cn/save_cn.rst
fluid_cn/scope_guard_cn.rst
+ fluid_cn/set_flags_cn.rst
+ fluid_cn/set_global_initializer_cn.rst
fluid_cn/Tensor_cn.rst
fluid_cn/Variable_cn.rst
fluid_cn/WeightNormParamAttr_cn.rst
diff --git a/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst b/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst
index 98cb0e800f79a1c25f3d92248d3a26de2191de8d..2d6f2fa0586898b53490af9f7a5bbb0a481a3aaa 100644
--- a/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/BuildStrategy_cn.rst
@@ -3,11 +3,14 @@
BuildStrategy
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.BuildStrategy
-``BuildStrategy`` 使用户更方便地控制[ ``ParallelExecutor`` ](../fluid_cn.html\#parallelexecutor)中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。
+:api_attr: 声明式编程模式(静态图)
+
+
+
+``BuildStrategy`` 使用户更方便地控制 :ref:`cn_api_fluid_ParallelExecutor` 中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。
**代码示例**
@@ -68,6 +71,7 @@ bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模
**代码示例**
.. code-block:: python
+
import paddle.fluid as fluid
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_broadcast_ops = True
@@ -108,6 +112,7 @@ bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存
import os
import numpy as np
+ import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
use_cuda = True
diff --git a/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst
index e07af202a05fd5bce8716cc61351954a39af005d..e091352c9018b355e234f8407625199d51c48555 100644
--- a/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/CPUPlace_cn.rst
@@ -5,6 +5,9 @@ CPUPlace
.. py:class:: paddle.fluid.CPUPlace
+
+
+
``CPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 ``CPU`` 设备。
**代码示例**
diff --git a/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst
index 59a8f8e74c13916017e5fcda1c804f8a27928f27..a3e669344b2bac46b8cb57d24bbc633bb3549be3 100644
--- a/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/CUDAPinnedPlace_cn.rst
@@ -5,6 +5,9 @@ CUDAPinnedPlace
.. py:class:: paddle.fluid.CUDAPinnedPlace
+
+
+
``CUDAPinnedPlace`` 是一个设备描述符,它所指代的页锁定内存由 CUDA 函数 ``cudaHostAlloc()`` 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝。
有关 CUDA 的数据转移和 ``pinned memory``,参见 `官方文档 `_ 。
diff --git a/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst b/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst
index 0ebbf7f6fe993b38bfe502c1aaef40e496464380..ba7cf62280b52b17dc310c8d9c1a5a4ca2cc6feb 100644
--- a/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/CUDAPlace_cn.rst
@@ -5,6 +5,9 @@ CUDAPlace
.. py:class:: paddle.fluid.CUDAPlace
+
+
+
.. note::
多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。
diff --git a/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst b/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst
index f9ec0995503393891741794489233c88df3f4d24..c6576c63468cac361d12294eb06eb8063d33814f 100644
--- a/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/CompiledProgram_cn.rst
@@ -3,10 +3,13 @@
CompiledProgram
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.CompiledProgram(program_or_graph, build_strategy=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于build_strategy更多信息。请参阅 ``fluid.BuildStrategy`` 。
参数:
@@ -22,34 +25,29 @@ CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进
.. code-block:: python
import paddle.fluid as fluid
- import paddle.fluid.compiler as compiler
import numpy
- import os
-
+
place = fluid.CUDAPlace(0) # fluid.CPUPlace()
exe = fluid.Executor(place)
-
- data = fluid.layers.data(name='X', shape=[1], dtype='float32')
+
+ data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
exe.run(fluid.default_startup_program())
- build_strategy = fluid.BuildStrategy()
- build_strategy.fuse_all_optimizer_ops = True
- compiled_prog = compiler.CompiledProgram(
- fluid.default_main_program(),
- build_strategy=build_strategy)
-
+ compiled_prog = fluid.CompiledProgram(
+ fluid.default_main_program())
+
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(compiled_prog,
- feed={"X": x},
- fetch_list=[loss.name])
+ feed={"X": x},
+ fetch_list=[loss.name])
.. py:method:: with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, share_vars_from=None, places=None)
-该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口是设置build_strategy**。
+该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy**。
参数:
- **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。
@@ -70,45 +68,47 @@ CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进
**代码示例**
.. code-block:: python
-
+
import paddle.fluid as fluid
- import paddle.fluid.compiler as compiler
import numpy
import os
-
+
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ parallel_places = [fluid.CUDAPlace(0), fluid.CUDAPlace(1)] if use_cuda else [fluid.CPUPlace()] * 2
+
# 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM,
# 否则fluid会把逻辑核的所有数目设为CPU_NUM,
# 在这种情况下,输入的batch size应大于CPU_NUM,
# 否则程序会异常中断。
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
-
+
exe = fluid.Executor(place)
-
- data = fluid.layers.data(name='X', shape=[1], dtype='float32')
+
+ data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
+
test_program = fluid.default_main_program().clone(for_test=True)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
-
+
exe.run(fluid.default_startup_program())
- build_strategy = fluid.BuildStrategy()
- build_strategy.fuse_all_reduce_ops = True
- compiled_train_prog = compiler.CompiledProgram(
- fluid.default_main_program()).with_data_parallel(
- loss_name=loss.name, build_strategy=build_strategy)
- # 注意:如果此处不设置share_vars_from=compiled_train_prog,测试过程中用的参数与训练使用的参数是不一致
- compiled_test_prog = compiler.CompiledProgram(
- test_program).with_data_parallel(
- share_vars_from=compiled_train_prog)
+ compiled_train_prog = fluid.CompiledProgram(
+ fluid.default_main_program()).with_data_parallel(
+ loss_name=loss.name, places=parallel_places)
+ # 注意:如果此处不设置share_vars_from=compiled_train_prog,
+ # 测试过程中用的参数与训练使用的参数是不一致
+ compiled_test_prog = fluid.CompiledProgram(
+ test_program).with_data_parallel(
+ share_vars_from=compiled_train_prog,
+ places=parallel_places)
train_data = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(compiled_train_prog,
- feed={"X": train_data},
- fetch_list=[loss.name])
+ feed={"X": train_data},
+ fetch_list=[loss.name])
test_data = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(compiled_test_prog,
- feed={"X": test_data},
- fetch_list=[loss.name])
\ No newline at end of file
+ feed={"X": test_data},
+ fetch_list=[loss.name])
\ No newline at end of file
diff --git a/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst b/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst
index 18f2ef957e98cfb9270509eadd9bc2b47c823a5f..29cf9b2a43c30fe7c6dd47af702eef29f9a863ec 100644
--- a/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/DataFeedDesc_cn.rst
@@ -3,10 +3,13 @@
DataFeedDesc
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.DataFeedDesc(proto_file)
+:api_attr: 声明式编程模式(静态图)
+
+
+
描述训练数据的格式。输入是一个文件路径名,其内容是protobuf message。
可以参考 :code:`paddle/fluid/framework/data_feed.proto` 查看我们如何定义message
diff --git a/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst b/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst
index 944974858402570fef9d3c4a6e795fac1ea3ab8f..1151d922e5db990139ac616e2aca2d617c1931f5 100644
--- a/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/DataFeeder_cn.rst
@@ -3,10 +3,13 @@
DataFeeder
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.DataFeeder(feed_list, place, program=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
``DataFeeder`` 负责将reader(读取器)返回的数据转成一种特殊的数据结构,使它们可以输入到 ``Executor`` 和 ``ParallelExecutor`` 中。
diff --git a/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst b/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst
index 7f4e94850a0016bd7bc6dbe72bf554a79000bcc9..398aceb246145d47be589ac166159b4259bedc87 100644
--- a/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/DistributeTranspilerConfig_cn.rst
@@ -6,6 +6,9 @@ DistributeTranspilerConfig
.. py:class:: paddle.fluid.DistributeTranspilerConfig
+
+
+
.. py:attribute:: slice_var_up (bool)
为多个Pserver(parameter server)将tensor切片, 默认为True。
diff --git a/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst b/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst
index 8876419cd8271936283b7a45cd2aa2858941bdb6..ab4fb17c706485902823d4bdc04c7e6c30498944 100644
--- a/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/DistributeTranspiler_cn.rst
@@ -6,6 +6,9 @@ DistributeTranspiler
.. py:class:: paddle.fluid.DistributeTranspiler (config=None)
+
+
+
该类可以把fluid program转变为分布式数据并行计算的program, 有PServer和NCCL2两种模式。
在Pserver(全称:parameter server)模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于parameter server的分布式架构(即PServer,参数服务器)来进行训练的program。
在NCCL2模式下, 通过 ``transpile`` 将用于单机训练的 ``program`` 转译为可用于NCCL2的分布式架构来进行训练的program。在NCCL2模式下,transpiler会在 ``startup_program`` 中附加一个 ``NCCL_ID`` 广播
diff --git a/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst b/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst
index 4d6cc28fa05d6a14f74650d6078a98ba06fb9d5c..25b623fbffda98bd12aea2579d882f5e33d97a43 100644
--- a/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/ExecutionStrategy_cn.rst
@@ -3,10 +3,13 @@
ExecutionStrategy
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.ExecutionStrategy
+:api_attr: 声明式编程模式(静态图)
+
+
+
通过设置 ``ExecutionStrategy`` 中的选项,用户可以对执行器的执行配置进行调整,比如设置执行器中线程池的大小等。
返回:初始化后的ExecutionStrategy的实例
@@ -33,7 +36,7 @@ ExecutionStrategy
train_exe = fluid.ParallelExecutor(use_cuda=False,
loss_name=avg_loss.name,
- exec_strategy=exec_strategy)
+ exec_strategy=exec_strategy)
.. py:attribute:: num_iteration_per_drop_scope
diff --git a/doc/fluid/api_cn/fluid_cn/Executor_cn.rst b/doc/fluid/api_cn/fluid_cn/Executor_cn.rst
index d668825c5b716f46affec5db405301379a90cf5e..7a2053a891c88a99d7336528f33b7ef87bb25f11 100644
--- a/doc/fluid/api_cn/fluid_cn/Executor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/Executor_cn.rst
@@ -4,14 +4,17 @@ Executor
-------------------------------
-**注意:该API仅支持【静态图】模式**
-.. py:class:: paddle.fluid.Executor (place)
+.. py:class:: paddle.fluid.Executor (place=None)
-Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传入设备。
+:api_attr: 声明式编程模式(静态图)
+
+
+
+Executor支持单GPU、多GPU以及CPU运行。
参数:
- - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。
+ - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()` ,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)` 。默认值为None。
返回:初始化后的 ``Executor`` 对象
@@ -26,9 +29,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
import numpy
import os
- use_cuda = True
- place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
- exe = fluid.Executor(place)
+ # 显式设置运行设备
+ # use_cuda = True
+ # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ # exe = fluid.Executor(place)
+
+ # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备
+ exe = fluid.Executor()
train_program = fluid.Program()
startup_program = fluid.Program()
@@ -55,8 +62,13 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
# 否则fluid会把逻辑核的所有数目设为CPU_NUM,
# 在这种情况下,输入的batch size应大于CPU_NUM,
# 否则程序会异常中断。
- if not use_cuda:
- os.environ['CPU_NUM'] = str(2)
+
+ # 显式设置运行设备
+ # if not use_cuda:
+ # os.environ['CPU_NUM'] = str(2)
+
+ # 未显示设置运行设备且安装的Paddle为CPU版本
+ os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
@@ -84,7 +96,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
exe.close()
-.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True,use_program_cache=False)
+.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True)
执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。
@@ -95,9 +107,10 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
- **feed_var_name** (str) – 该参数表示数据输入算子(feed operator)的输入变量名称。默认为:"feed"。
- **fetch_var_name** (str) – 该参数表示结果获取算子(fetch operator)的输出变量名称。默认为:"fetch"。
- **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。
- - **return_numpy** (bool) – 该参数表示是否将返回返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。
+ - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。
- **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。
-
+ - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list中指定的变量)进行合并。如果 ``return_merged`` 设为False,返回值类型是一个Tensor的二维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的二维列表( ``return_numpy`` 设为True时)。如果 ``return_merged`` 设为True,返回值类型是一个Tensor的一维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的一维列表( ``return_numpy`` 设为True时)。更多细节请参考示例代码2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为False。
+
返回:返回fetch_list中指定的变量值
返回类型:List
@@ -107,7 +120,7 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。
-**示例代码**
+**示例代码1**
.. code-block:: python
@@ -131,6 +144,62 @@ Executor支持单GPU、多GPU以及CPU运行。在Executor构造时,需要传
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
+
+**示例代码2**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ # 创建Executor对象
+ place = fluid.CUDAPlace(0)
+ exe = fluid.Executor(place)
+ data = fluid.data(name='X', shape=[None, 1], dtype='float32')
+ class_dim = 2
+ prediction = fluid.layers.fc(input=data, size=class_dim)
+ loss = fluid.layers.mean(prediction)
+ adam = fluid.optimizer.Adam()
+ adam.minimize(loss)
+ # 运行且仅运行一次startup program
+ exe.run(fluid.default_startup_program())
+ build_strategy = fluid.BuildStrategy()
+ binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(
+ loss_name=loss.name, build_strategy=build_strategy)
+ batch_size = 6
+ x = np.random.random(size=(batch_size, 1)).astype('float32')
+ # 1) 设置 return_merged 参数为False以获取不合并的计算结果:
+ unmerged_prediction, = exe.run(binary, feed={'X': x},
+ fetch_list=[prediction.name],
+ return_merged=False)
+ # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。
+ # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用
+ # 的GPU卡数之商。
+ print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape))
+ print(unmerged_prediction)
+ # 2) 设置 return_merged 参数为True以获取合并的计算结果:
+ merged_prediction, = exe.run(binary, feed={'X': x},
+ fetch_list=[prediction.name],
+ return_merged=True)
+ # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(6, class_dim)。输出结果
+ # 中第一个维度值代表batch_size值。
+ print("The merged prediction shape: {}".format(np.array(merged_prediction).shape))
+ print(merged_prediction)
+ # 输出:
+ # The unmerged prediction shape: (2, 3, 2)
+ # [array([[-0.37620035, -0.19752218],
+ # [-0.3561043 , -0.18697084],
+ # [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354],
+ # [-0.49041364, -0.25748932],
+ # [-0.44331917, -0.23276259]], dtype=float32)]
+ # The merged prediction shape: (6, 2)
+ # [[-0.37789783 -0.19921964]
+ # [-0.3577645 -0.18863106]
+ # [-0.24274671 -0.12814042]
+ # [-0.24635398 -0.13003758]
+ # [-0.49232286 -0.25939852]
+ # [-0.44514108 -0.2345845 ]]
+
+
.. py:method:: infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100)
infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。 infer_from_dataset()可以非常容易地用于多线程中的评估。
diff --git a/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst b/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst
index 6127ccf69939d1a3adb9d2a56538bc950af4bddf..4fa9be57d50dbec6f1137f7cf04fdc83f349af79 100644
--- a/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/LoDTensorArray_cn.rst
@@ -5,6 +5,9 @@ LoDTensorArray
.. py:class:: paddle.fluid.LoDTensorArray
+
+
+
LoDTensorArray是由LoDTensor组成的数组,支持"[]"运算符、len()函数和for迭代等。
**示例代码**
diff --git a/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst b/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst
index 3897fafac9e68c23a0d758e990ea8591804eebb9..2eaf09eecf00e779296f282482324e4b2a8385cc 100644
--- a/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/LoDTensor_cn.rst
@@ -6,6 +6,9 @@ LoDTensor
.. py:class:: paddle.fluid.LoDTensor
+
+
+
LoDTensor是一个具有LoD(Level of Details)信息的张量(Tensor),可用于表示变长序列,详见 :ref:`cn_user_guide_lod_tensor` 。
LoDTensor可以通过 ``np.array(lod_tensor)`` 方法转换为numpy.ndarray。
diff --git a/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst b/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst
index 1d2f405a8ebf818fc68879efffa4fc44a177c076..8e391956ed24e66a6fb736dc4165b14a7e734cad 100644
--- a/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/ParallelExecutor_cn.rst
@@ -3,10 +3,13 @@
ParallelExecutor
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.ParallelExecutor(use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, scope=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
``ParallelExecutor`` 是 ``Executor`` 的一个升级版本,可以支持基于数据并行的多节点模型训练和测试。如果采用数据并行模式, ``ParallelExecutor`` 在构造时会将参数分发到不同的节点上,并将输入的 ``Program`` 拷贝到不同的节点,在执行过程中,各个节点独立运行模型,将模型反向计算得到的参数梯度在多个节点之间进行聚合,之后各个节点独立的进行参数的更新。如果使用GPU运行模型,即 ``use_cuda=True`` ,节点指代GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的GPU资源,用户也可以通过在环境变量设置可用的GPU资源,例如:希望使用GPU0、GPU1计算,export CUDA_VISIBLEDEVICES=0,1;如果在CPU上进行操作,即 ``use_cuda=False`` ,节点指代CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM ,并将该值设置为CPU设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1**。
参数:
diff --git a/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst b/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst
index 43975efd6a3b0723ad7ce51d3399b3de99d8dc5e..641ed94ab2d0b52e9ef2de4cdd783f61e5df672f 100644
--- a/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/ParamAttr_cn.rst
@@ -5,7 +5,14 @@ ParamAttr
-------------------------------
-.. py:class:: paddle.fluid.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None, do_model_average=False)
+.. py:class:: paddle.fluid.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False)
+
+
+
+
+.. note::
+ 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、
+ :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
创建一个参数属性对象,用户可设置参数的名称、初始化方式、学习率、正则化规则、是否需要训练、梯度裁剪方式、是否做模型平均等属性。
@@ -13,9 +20,10 @@ ParamAttr
- **name** (str,可选) - 参数的名称。默认值为None,表示框架自动创建参数的名称。
- **initializer** (Initializer,可选) - 参数的初始化方式。默认值为None,表示权重参数采用Xavier初始化方式,偏置参数采用全0初始化方式。
- **learning_rate** (float) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以learning rate schedule的系数。
- - **regularizer** (WeightDecayRegularizer,可选) - 正则化因子。默认值为None,表示没有正则化因子。
+ - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
+ :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也
+ 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。
- **trainable** (bool) - 参数是否需要训练。默认值为True,表示需要训练。
- - **gradient_clip** (BaseGradientClipAttr,可选) - 梯度裁剪方式。默认值为None,表示不需要梯度裁剪。
- **do_model_average** (bool) - 是否做模型平均。默认值为False,表示不做模型平均。
返回: 表示参数属性的对象。
diff --git a/doc/fluid/api_cn/fluid_cn/Program_cn.rst b/doc/fluid/api_cn/fluid_cn/Program_cn.rst
index 3fc5c40939c615a2b15db8144ed28ff6c05e0e52..2a611c7b0e913f8ed6ceb13ad88566c101dc8ef6 100644
--- a/doc/fluid/api_cn/fluid_cn/Program_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/Program_cn.rst
@@ -5,6 +5,9 @@ Program
.. py:class:: paddle.fluid.Program
+
+
+
**注意:默认情况下,Paddle Fluid内部默认含有** :ref:`cn_api_fluid_default_startup_program` **和** :ref:`cn_api_fluid_default_main_program` **,它们共享参数。** :ref:`cn_api_fluid_default_startup_program` **只运行一次来初始化参数,** :ref:`cn_api_fluid_default_main_program` **在每个mini batch中运行并更新权重。**
Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block` ,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block` ,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_
@@ -57,13 +60,12 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
import paddle.fluid as fluid
prog = fluid.default_main_program()
- a = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False)
- c = fluid.layers.fc(a, size=3)
+ x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False)
+ pred = fluid.layers.fc(x, size=3)
prog_string = prog.to_string(throw_on_error=True, with_details=False)
prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)
- print(prog_string)
- print("\n =============== with_details =============== \n")
- print(prog_string_with_details)
+ print("program string without detail: {}".format(prog_string))
+ print("program string with detail: {}".format(prog_string_with_details))
.. py:method:: clone(for_test=False)
@@ -82,16 +84,19 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
**代码示例**
- .. code-block:: python
+ ::
- import paddle.fluid as fluid
- ## 我们推荐在使用 Optimizer前使用clone()接口
- test_program = fluid.default_main_program().clone(for_test=True)
- optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
- optimizer.minimize()
+ import paddle.fluid as fluid
+ img = fluid.layers.data(name='image', shape=[784])
+ pred = fluid.layers.fc(input=img, size=10, act='relu')
+ loss = fluid.layers.mean(pred)
+ ## 我们推荐在使用 Optimizer前使用clone()接口
+ test_program = fluid.default_main_program().clone(for_test=True)
+ optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
+ optimizer.minimize(loss)
参数:
- - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP
+ - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP,默认值为False
返回:当 ``for_test=True`` 时返回一个新的、仅包含当前Program前向内容的Program。否则返回一个新的,和当前Program完全相同的Program
@@ -150,7 +155,7 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
- test_program = train_program.clone(for_test=False)
+ test_program = train_program.clone(for_test=True)
print_prog(test_program)
# 由于需要使训练和测试参数共享,我们需要使用训练的 ``startup_program``
@@ -182,7 +187,8 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
- def network(is_test):
+
+ def network():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
@@ -192,19 +198,19 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
avg_loss = fluid.layers.mean(loss)
return avg_loss
-
train_program_2 = fluid.Program()
startup_program_2 = fluid.Program()
test_program_2 = fluid.Program()
with fluid.program_guard(train_program_2, startup_program_2):
with fluid.unique_name.guard():
- sgd = fluid.optimizer.SGD(learning_rate=1e-3)
- sgd.minimize(avg_loss)
+ avg_loss = network()
+ sgd = fluid.optimizer.SGD(learning_rate=1e-3)
+ sgd.minimize(avg_loss)
# 不使用测试阶段的启动程序
- with fluid.program_guard(test_program_2, fluid.Program()):
+ with fluid.program_guard(test_program_2, startup_program_2):
with fluid.unique_name.guard():
- loss = network(is_test=True)
- print(test_program_2)
+ avg_loss = network()
+ print_prog(test_program_2)
上边两个代码片段生成和打印的Program是一样的。
@@ -268,24 +274,7 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
.. py:attribute:: random_seed
-**注意:必须在相关OP被添加之前设置。例如**
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
-
- prog = fluid.default_main_program()
- random_seed = prog.random_seed
- x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False)
-
- # 这里我们必须要在fluid.layers.dropout之前设置random_seed
- print(random_seed)
- prog.random_seed = 1
- z_var = fluid.layers.dropout(x_var, 0.7)
-
- print(prog.random_seed)
+**注意:必须在相关OP被添加之前设置。**
程序中随机运算符的默认随机种子。0意味着随机生成随机种子。
@@ -301,12 +290,16 @@ Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构
prog = fluid.default_main_program()
random_seed = prog.random_seed
+ x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False)
print(random_seed)
- prog.random_seed = 1
- print(prog.random_seed)
-
## 0
## 默认的random seed是 0
+
+ # 这里我们必须要在fluid.layers.dropout之前设置random_seed
+ prog.random_seed = 1
+ z_var = fluid.layers.dropout(x_var, 0.7)
+
+ print(prog.random_seed)
## 1
## 修改后random seed变成了 1
diff --git a/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst b/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst
index da5698d18fff154660774cefabf645d4f7ff141d..0cae5aac94cd27f7e3432a19b295f2b03ef606b8 100644
--- a/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/Tensor_cn.rst
@@ -5,6 +5,9 @@ Tensor
.. py:function:: paddle.fluid.Tensor
+
+
+
Tensor用于表示多维张量,可以通过 ``np.array(tensor)`` 方法转换为numpy.ndarray。
**示例代码**
diff --git a/doc/fluid/api_cn/fluid_cn/Variable_cn.rst b/doc/fluid/api_cn/fluid_cn/Variable_cn.rst
index 77e282afbccd5fa701892ae5c0dc286a1465d68f..83af840cce8702bc2af188a1cab30f3561049d98 100644
--- a/doc/fluid/api_cn/fluid_cn/Variable_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/Variable_cn.rst
@@ -5,6 +5,9 @@ Variable
.. py:class:: paddle.fluid.Variable
+
+
+
**注意:**
**1. 请不要直接调用** `Variable` **的构造函数,因为这会造成严重的错误发生!**
@@ -142,7 +145,7 @@ Variable
**参数:**
- - **backward_strategy**: ( :ref:`cn_api_fluid_dygraph_BackwardStrategy` ) 使用何种 :ref:`cn_api_fluid_dygraph_BackwardStrategy` 聚合反向的梯度
+ - **retain_graph** (bool,可选) – 该参数用于确定反向梯度更新完成后反向梯度计算图是否需要保留(retain_graph为True则保留反向梯度计算图)。若用户打算在执行完该方法( :code:`backward` )后,继续向之前已构建的计算图中添加更多的Op,则需要设置 :code:`retain_graph` 值为True(这样才会保留之前计算得到的梯度)。可以看出,将 :code:`retain_graph` 设置为False可降低内存的占用。默认值为False。
返回:无
@@ -150,23 +153,20 @@ Variable
**示例代码**
.. code-block:: python
- import paddle.fluid as fluid
import numpy as np
-
+ import paddle
+ paddle.disable_static()
x = np.ones([2, 2], np.float32)
- with fluid.dygraph.guard():
- inputs2 = []
- for _ in range(10):
- tmp = fluid.dygraph.base.to_variable(x)
- # 如果这里我们不为输入tmp设置stop_gradient=False,那么后面loss2也将因为这个链路都不需要梯度
- # 而不产生梯度
- tmp.stop_gradient=False
- inputs2.append(tmp)
- ret2 = fluid.layers.sums(inputs2)
- loss2 = fluid.layers.reduce_sum(ret2)
- backward_strategy = fluid.dygraph.BackwardStrategy()
- backward_strategy.sort_sum_gradient = True
- loss2.backward(backward_strategy)
+ inputs = []
+ for _ in range(10):
+ tmp = paddle.to_tensor(x)
+ # 如果这里我们不为输入tmp设置stop_gradient=False,那么后面loss也将因为这个链路都不需要梯度
+ # 而不产生梯度
+ tmp.stop_gradient=False
+ inputs.append(tmp)
+ ret = paddle.sums(inputs)
+ loss = paddle.reduce_sum(ret)
+ loss.backward()
.. py:method:: gradient()
@@ -199,9 +199,7 @@ Variable
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
- backward_strategy = fluid.dygraph.BackwardStrategy()
- backward_strategy.sort_sum_gradient = True
- loss2.backward(backward_strategy)
+ loss2.backward()
print(loss2.gradient())
# example2: 返回tuple of ndarray
@@ -245,9 +243,7 @@ Variable
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
- backward_strategy = fluid.dygraph.BackwardStrategy()
- backward_strategy.sort_sum_gradient = True
- loss2.backward(backward_strategy)
+ loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
@@ -348,6 +344,7 @@ Variable
.. code-block:: python
import paddle.fluid as fluid
+ import numpy as np
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
@@ -363,9 +360,9 @@ Variable
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward()
- # 可以发现这里linear的参数变成了
- assert (linear.weight.gradient() == 0).all()
- assert (out1.gradient() == 0).all()
+ # 可以发现这里linear的参数梯度变成了None
+ assert linear.weight.gradient() is None
+ assert out1.gradient() is None
.. py:attribute:: persistable
diff --git a/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst b/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst
index 88946bb7ae93ddbd01da63fc16769f53d16a8023..d17b0380c5f8c5903b75926f3ec326d4d3726320 100644
--- a/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/WeightNormParamAttr_cn.rst
@@ -3,10 +3,16 @@
WeightNormParamAttr
-------------------------------
-**注意:该API仅支持【静态图】模式**
-.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None, do_model_average=False)
+.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
+.. note::
+ 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、
+ :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。
该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_
@@ -15,9 +21,10 @@ WeightNormParamAttr
- **name** (None|str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认为None。
- **initializer** (Initializer) - 初始化参数方法,例如 ``initializer = fluid.initializer.ConstantInitializer(1.0)`` 。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。
- **learning_rate** (float32) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。
- - **regularizer** (WeightDecayRegularizer) - 正则化方法,例如 ``regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1)`` 。默认为None,如果为None则对权重不做正则化。
+ - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、
+ :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也
+ 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。
- **trainable** (bool) - 可选,指明参数是否可训练,默认为True。
- - **gradient_clip** - 梯度裁剪(Gradient Clipping)的方法,例如 ``gradient_clip = fluid.clip.GradientClipByNorm(clip_norm=2.0))`` 。默认为None,如果为None则对权重不做裁剪。
- **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为False。
@@ -36,7 +43,6 @@ WeightNormParamAttr
learning_rate=1.0,
regularizer=fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1),
trainable=True,
- gradient_clip=fluid.clip.GradientClipByNorm(clip_norm=2.0),
do_model_average=False))
diff --git a/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst
index 973547e0adc5f082dbb1c3edf29681f7dc15e2fe..124973fc786ad84108f2478809a735e6ce45a081 100644
--- a/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/cpu_places_cn.rst
@@ -5,6 +5,9 @@ cpu_places
.. py:function:: paddle.fluid.cpu_places(device_count=None)
+
+
+
该接口创建 ``device_count`` 个 ``fluid.CPUPlace`` 对象,并返回所创建的对象列表。
如果 ``device_count`` 为 ``None``,则设备数目将由环境变量 ``CPU_NUM`` 确定。如果未设置 ``CPU_NUM`` 环境变量,则设备数目会默认设为1,也就是说, ``CPU_NUM=1``。
diff --git a/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst b/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst
index 1a6884d1f042237ddab5b22ac193965a3634e7c6..386b0632f1a0256e8cad62c2143a05c7684ded1d 100644
--- a/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/create_lod_tensor_cn.rst
@@ -6,6 +6,9 @@ create_lod_tensor
.. py:function:: paddle.fluid.create_lod_tensor(data, recursive_seq_lens, place)
+
+
+
从一个numpy数组、list或LoDTensor创建一个新的LoDTensor。
具体实现方法如下:
diff --git a/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst b/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst
index ee3999183571ef62e41aaa97f0734434e22628f1..afe15fe1d4c92d7422d497cecc29ed10c2f4b14f 100644
--- a/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/create_random_int_lodtensor_cn.rst
@@ -4,10 +4,13 @@
create_random_int_lodtensor
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, high)
+:api_attr: 声明式编程模式(静态图)
+
+
+
创建一个包含随机整数的LoDTensor。
具体实现方法如下:
diff --git a/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst
index d4cf156130daffd4b9fc5281a760b3b25d6b7ede..9c3955b528ce692e7c0d1ba3f6da0431080a7272 100644
--- a/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/cuda_pinned_places_cn.rst
@@ -8,6 +8,9 @@ cuda_pinned_places
+
+
+
该接口创建 ``device_count`` 个 ``fluid.CUDAPinnedPlace`` ( fluid. :ref:`cn_api_fluid_CUDAPinnedPlace` ) 对象,并返回所创建的对象列表。
如果 ``device_count`` 为 ``None``,实际设备数目将由当前任务中使用的GPU设备数决定。用户可通过以下2种方式设置任务可用的GPU设备:
diff --git a/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst b/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst
index babb94512a7649449dee100a65705badf7379d62..b0294e9cb5b2df17d525a053c68005d0355bbe2e 100644
--- a/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/cuda_places_cn.rst
@@ -5,6 +5,9 @@ cuda_places
.. py:function:: paddle.fluid.cuda_places(device_ids=None)
+
+
+
.. note::
多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。
diff --git a/doc/fluid/api_cn/fluid_cn/data_cn.rst b/doc/fluid/api_cn/fluid_cn/data_cn.rst
index 67f99be46b3645dccff53a734b8d506c7092d989..14a6ab6ea1d94dcdc3586417ef9c85db98783c74 100644
--- a/doc/fluid/api_cn/fluid_cn/data_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/data_cn.rst
@@ -3,10 +3,12 @@
data
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.data(name, shape, dtype='float32', lod_level=0)
+
+
+
该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量
注意:
diff --git a/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst b/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst
index 6a2eeb42130baa1b71cc95e6faf1d50720693410..4759fafea72a09002fc9e497baeb99983b2c6218 100644
--- a/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/default_main_program_cn.rst
@@ -6,6 +6,9 @@ default_main_program
.. py:function:: paddle.fluid.default_main_program()
+
+
+
此接口可以获取当前用于存储op和variable描述信息的 ``default main program``
``fluid.layers`` 接口中添加的op和variable会存储在 ``default main program`` 中
@@ -28,8 +31,8 @@ default_main_program
import paddle.fluid as fluid
#示例网络:
- data = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
+ label = fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
diff --git a/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst b/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst
index 2c25eb00f74484682af5495e2cc386a1d67690bf..bfc247c29b6a952d59e8ac524a558f32843cb536 100644
--- a/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/default_startup_program_cn.rst
@@ -10,6 +10,9 @@ default_startup_program
+
+
+
该函数可以获取默认/全局 startup :ref:`cn_api_fluid_Program` (初始化启动程序)。
:ref:`_cn_api_fluid_layers` 中的函数会新建参数或 :ref:`cn_api_paddle_data_reader_reader` (读取器) 或 `NCCL `_ 句柄作为全局变量。
diff --git a/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst
new file mode 100755
index 0000000000000000000000000000000000000000..7d369cc9740652a4d6d4c5a23ff723fdfa0dbdc4
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst
@@ -0,0 +1,36 @@
+.. _cn_api_fluid_device_guard:
+
+device_guard
+-------------------------------
+
+**注意:该API仅支持【静态图】模式**
+
+.. py:function:: paddle.fluid.device_guard(device=None)
+
+一个用于指定OP运行设备的上下文管理器。
+
+参数:
+ - **device** (str|None) – 指定上下文中使用的设备。它可以是'cpu'或者'gpu‘,当它被设置为'cpu'或者'gpu'时,创建在该上下文中的OP将被运行在CPUPlace或者CUDAPlace上。若设置为'gpu',同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致。默认值:None,在该上下文中的OP将被自动地分配设备。
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ support_gpu = fluid.is_compiled_with_cuda()
+ place = fluid.CPUPlace()
+ if support_gpu:
+ place = fluid.CUDAPlace(0)
+ # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)
+ data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')
+ data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')
+ shape = fluid.layers.shape(data2)
+ with fluid.device_guard("cpu"):
+ # Ops created here will be placed on CPUPlace
+ shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])
+ with fluid.device_guard('gpu'):
+ # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace
+ out = fluid.layers.crop_tensor(data1, shape=shape)
+ exe = fluid.Executor(place)
+ exe.run(fluid.default_startup_program())
+ result = exe.run(fetch_list=[out])
diff --git a/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst b/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..59dc22b7b491cf59f13fe9586f6d98bbaa86f00b
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/disable_dygraph_cn.rst
@@ -0,0 +1,22 @@
+.. _cn_api_fluid_disable_dygraph:
+
+disable_dygraph
+-------------------------------
+
+.. py:function:: paddle.fluid.disable_dygraph()
+
+该接口关闭动态图模式。
+
+返回:无
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ fluid.enable_dygraph() # Now we are in dygraph mode
+ print(fluid.in_dygraph_mode()) # True
+ fluid.disable_dygraph()
+ print(fluid.in_dygraph_mode()) # False
+
diff --git a/doc/fluid/api_cn/fluid_cn/embedding_cn.rst b/doc/fluid/api_cn/fluid_cn/embedding_cn.rst
index d8d5f0c356bc087f1bea6d64e8515ebe985adc51..ee70bead5caa9abd9db685f1709cae196ced9028 100644
--- a/doc/fluid/api_cn/fluid_cn/embedding_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/embedding_cn.rst
@@ -3,10 +3,13 @@
embedding
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32')
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,函数会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。
输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了emb_size的维度。
diff --git a/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst b/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0df485bd856a4c70f3638db7c2f6b7470c143fe1
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/enable_dygraph_cn.rst
@@ -0,0 +1,25 @@
+.. _cn_api_fluid_enable_dygraph:
+
+enable_dygraph
+-------------------------------
+
+.. py:function:: paddle.fluid.enable_dygraph(place=None)
+
+该接口打开动态图模式。
+
+参数:
+ - **place** (fluid.CPUPlace 或 fluid.CUDAPlace,可选) - 执行动态图的设备数目。若为None,则设备根据paddle的编译方式决定。默认值为 ``None``。
+
+返回:无
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ fluid.enable_dygraph() # Now we are in dygraph mode
+ print(fluid.in_dygraph_mode()) # True
+ fluid.disable_dygraph()
+ print(fluid.in_dygraph_mode()) # False
+
diff --git a/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst b/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e0323cf1f3e52c6c927ad7944a7e26a0a051442f
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/get_flags_cn.rst
@@ -0,0 +1,21 @@
+.. _cn_api_fluid_get_flags:
+
+get_flags
+-------------------------------
+
+.. py:function:: paddle.fluid.get_flags(flags)
+用于获取Paddle框架中环境变量FLAGS的当前值。
+
+参数:
+ - **flags** (list|tuple|str) - 需要获取的环境变量FLAGS的名称。
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
+ res = fluid.get_flags(flags)
+ print(res)
+ # {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}
diff --git a/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst b/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst
index 7f649449c9eb386df7929b72f118b032f23fe6e0..86031eedc8c1c8f8fc9083054b189194dba6a009 100644
--- a/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/global_scope_cn.rst
@@ -3,10 +3,13 @@
global_scope
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.global_scope()
+:api_attr: 声明式编程模式(静态图)
+
+
+
获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。
返回:全局/默认作用域实例
diff --git a/doc/fluid/api_cn/fluid_cn/gradients_cn.rst b/doc/fluid/api_cn/fluid_cn/gradients_cn.rst
index 7e2e7d4fd635de3332aea4f293951567dd66c79c..b5813a0a1c6b73aaecff4bb3939fbc3ef8d1b594 100644
--- a/doc/fluid/api_cn/fluid_cn/gradients_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/gradients_cn.rst
@@ -3,10 +3,13 @@
gradients
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.gradients(targets, inputs, target_gradients=None, no_grad_set=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
将目标梯度反向传播到输入。
参数:
@@ -26,7 +29,7 @@ gradients
import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[2,8,8], dtype='float32')
+ x = fluid.data(name='x', shape=[None,2,8,8], dtype='float32')
x.stop_gradient=False
y = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
y = fluid.layers.relu(y)
diff --git a/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst b/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst
index 528dac5988992ad5e5d0e32bf28e0c49af4fa134..06c960ce5c3debdc422d8098744b5e7ecaa73bb5 100644
--- a/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/in_dygraph_mode_cn.rst
@@ -5,6 +5,9 @@ in_dygraph_mode
.. py:function:: paddle.fluid.in_dygraph_mode()
+
+
+
该接口检查程序是否在动态图模式中运行。
可以通过 ``fluid.dygraph.guard`` 接口开启动态图模式。
@@ -16,11 +19,11 @@ in_dygraph_mode
.. code-block:: python
- from __future__ import print_function
import paddle.fluid as fluid
- if fluid.in_dygraph_mode():
- print('running in dygraph mode')
- else:
- print('not running in dygraph mode')
+
+ fluid.enable_dygraph() # 现在进入 dygragh 模式
+ print(fluid.in_dygraph_mode()) # True
+ fluid.disable_dygraph()
+ print(fluid.in_dygraph_mode()) # False
diff --git a/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst b/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst
index 58112a408b1d57275e69a37ca48ba1bf7e55db6f..5f2741e78783c432eb22fd82509e2f3ebf7c808e 100644
--- a/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/is_compiled_with_cuda_cn.rst
@@ -5,6 +5,9 @@ is_compiled_with_cuda
.. py:function:: paddle.fluid.is_compiled_with_cuda()
+
+
+
检查 ``whl`` 包是否可以被用来在GPU上运行模型
返回:支持gpu则为True,否则为False。
diff --git a/doc/fluid/api_cn/fluid_cn/load_cn.rst b/doc/fluid/api_cn/fluid_cn/load_cn.rst
index f353a2457bce5f46c08ce647c6c4836df090330c..a12a65fbed8eacfcf1f17246fc3ee3001dd81c70 100644
--- a/doc/fluid/api_cn/fluid_cn/load_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/load_cn.rst
@@ -5,6 +5,10 @@ load
.. py:function:: paddle.fluid.load(program, model_path, executor=None, var_list=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。
如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。
diff --git a/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst b/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst
index e5bb133e10cf5bbf59d609f661f0c38b134fe44b..944b78dc4e9cc84a2cabc3951d3091969f3dc763 100644
--- a/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/load_op_library_cn.rst
@@ -5,6 +5,10 @@ load_op_library
.. py:class:: paddle.fluid.load_op_library
+:api_attr: 声明式编程模式(静态图)
+
+
+
``load_op_library`` 用于自定义C++算子中,用来加载算子动态共享库。加载库后,注册好的算子及其Kernel实现将在PaddlePaddle主进程中可以被调用。 请注意,自定义算子的类型不能与框架中的现有算子类型相同。
参数:
diff --git a/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst b/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst
index 4b987db2c809c97859bdcc2c10cc5784489552e6..bc95cc3995e565d50cc846d613d449bb89d6e936 100644
--- a/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/memory_optimize_cn.rst
@@ -3,9 +3,12 @@
memory_optimize
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0, skip_grads=True)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略**
diff --git a/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst b/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst
index 7f7be417930571c326a6be93b8a051caa3266cef..bf17054ba22ba285d436886c5e7491c8447c0132 100644
--- a/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/name_scope_cn.rst
@@ -3,10 +3,13 @@
name_scope
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.name_scope(prefix=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该函数为operators生成不同的命名空间。该函数只用于调试和可视化,不建议用在其它方面。
diff --git a/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst b/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst
index 0d36a980ebe7aa4940b98d58b81ff91d997e0594..fbf08df2176015fbe1a50ffef5da07e2a958089c 100644
--- a/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/one_hot_cn.rst
@@ -5,6 +5,12 @@ one_hot
.. py:function:: paddle.fluid.one_hot(input, depth, allow_out_of_range=False)
+:alias_main: paddle.nn.functional.one_hot
+:alias: paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot
+:old_api: paddle.fluid.one_hot
+
+
+
该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。
输出的Tensor(或LoDTensor)的shape是在输入shape的最后一维后面添加了depth的维度。
diff --git a/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst
index 83c53aeab3e1791ca5f99fa1db623a24e465acdd..d1b9e68b08e74b52c029aa848ab5bd382cc36e9c 100644
--- a/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/program_guard_cn.rst
@@ -3,10 +3,13 @@
program_guard
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.program_guard(main_program, startup_program=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口应配合使用python的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。
``with`` 语句块中的fluid.layers下各接口将在新的main program(主程序)中添加operators(算子)和variables(变量)。
@@ -23,7 +26,7 @@ program_guard
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
- data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10, act='relu')
例如,当组的网不需要startup_program初始化各变量时,可以传入一个临时的program。
@@ -36,5 +39,5 @@ program_guard
main_program = fluid.Program()
# 如果您不需要关心startup program,传入一个临时值即可
with fluid.program_guard(main_program, fluid.Program()):
- data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
diff --git a/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst b/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst
index 253401270e141585cb874aa5d02a2470c530ec3c..ef68a966501630d8a29aadee6622f2e16bfca447 100644
--- a/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/release_memory_cn.rst
@@ -3,8 +3,11 @@
release_memory
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.release_memory(input_program, skip_opt_set=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**从1.6版本开始此接口不再推荐使用,请不要在新写的代码中使用它,1.6+版本已默认开启更优的存储优化策略**
diff --git a/doc/fluid/api_cn/fluid_cn/require_version_cn.rst b/doc/fluid/api_cn/fluid_cn/require_version_cn.rst
index bcaa95f2baa853b55fbec35863c06018f1e22b08..19f14cb37fe9adc927141d647e61ddfa843bc3d2 100644
--- a/doc/fluid/api_cn/fluid_cn/require_version_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/require_version_cn.rst
@@ -4,6 +4,9 @@ require_version
-------------------------------
.. py:function:: paddle.fluid.require_version(min_version, max_version=None)
+
+
+
该接口用于检查已安装的飞桨版本是否介于[``min_version``, ``max_version``]之间(包含 ``min_version`` 和 ``max_version`` ),如果已安装的版本低于 ``min_version`` 或者高于 ``max_version`` ,将会抛出异常。该接口无返回值。
参数:
diff --git a/doc/fluid/api_cn/fluid_cn/save_cn.rst b/doc/fluid/api_cn/fluid_cn/save_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a15b23f2b5b1b798943dcb6d7ef49ce8fac229f1
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/save_cn.rst
@@ -0,0 +1,51 @@
+.. _cn_api_fluid_save:
+
+save
+-------------------------------
+
+
+.. py:function:: paddle.fluid.save(program, model_path)
+
+:api_attr: 声明式编程模式(静态图)
+:alias_main: paddle.save
+:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save
+:old_api: paddle.fluid.save
+
+
+
+该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。
+
+参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。
+
+优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。
+所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。
+
+网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。
+
+参数:
+ - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。
+ - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。
+
+返回: 无
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ x = fluid.data(name="x", shape=[10, 10], dtype='float32')
+ y = fluid.layers.fc(x, 10)
+ z = fluid.layers.fc(y, 10)
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ exe.run(fluid.default_startup_program())
+
+ fluid.save(fluid.default_main_program(), "./test_path")
+
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst
index 2bbbe08642c4303c4399db5f1c34aab9904e3280..df0566e1b7c3e4931bf69e3734773e4a333f4d57 100644
--- a/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst
+++ b/doc/fluid/api_cn/fluid_cn/scope_guard_cn.rst
@@ -3,10 +3,13 @@
scope_guard
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.scope_guard(scope)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口通过 python 的 ``with`` 语句切换作用域(scope)。
作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。
diff --git a/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst b/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a4f6fe1cd02bd2a691059b6732a04e757ab0304e
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/set_flags_cn.rst
@@ -0,0 +1,18 @@
+.. _cn_api_fluid_set_flags:
+
+set_flags
+-------------------------------
+
+.. py:function:: paddle.fluid.set_flags(flags)
+用于设置Paddle框架中环境变量FLAGS的值。
+
+参数:
+ - **flags** (dict) - 包含想要设置的环境变量FLAGS的名称和值的字典。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})
diff --git a/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst b/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..023629cf753daf5ecfb29a1b1984fbd184604bc4
--- /dev/null
+++ b/doc/fluid/api_cn/fluid_cn/set_global_initializer_cn.rst
@@ -0,0 +1,42 @@
+.. _cn_api_fluid_set_global_initializer:
+
+set_global_initializer
+-------------------------------
+
+.. py:function:: paddle.fluid.set_global_initializer(weight_init, bias_init=None)
+
+该API用于设置Paddle框架中全局的参数初始化方法。该API只对位于其后的代码生效。
+
+模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。
+该API的设置仅对模型参数生效,对通过 :ref:`cn_api_fluid_layers_create_global_var` 、 :ref:`cn_api_fluid_layers_create_tensor` 等API创建的变量不会生效。
+
+如果创建网络层时还通过 ``param_attr`` 、 ``bias_attr`` 设置了初始化方式,这里的全局设置将不会生效,因为其优先级更低。
+
+参数:
+ - **weight_init** (Initializer) - 设置框架的全局的weight参数初始化方法。
+ - **bias_init** (Initializer,可选) - 设置框架的全局的bias参数初始化方法。默认:None。
+
+返回:无
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ fluid.set_global_initializer(fluid.initializer.Uniform(), fluid.initializer.Constant())
+ x = fluid.data(name="x", shape=[1, 3, 32, 32])
+
+ # conv1的weight参数是通过Uniform来初始化
+ # conv1的bias参数是通过Constant来初始化
+ conv1 = fluid.layers.conv2d(x, 5, 3)
+
+ # 如果同时设置了param_attr/bias_attr, 全局初始化将不会生效
+ # conv2的weight参数是通过Xavier来初始化
+ # conv2的bias参数是通过Normal来初始化
+ conv2 = fluid.layers.conv2d(conv1, 5, 3,
+ param_attr=fluid.initializer.Xavier(),
+ bias_attr=fluid.initializer.Normal())
+
+ # 取消全局参数初始化的设置
+ fluid.set_global_initializer(None)
\ No newline at end of file
diff --git a/doc/fluid/api_cn/framework_cn.rst b/doc/fluid/api_cn/framework_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..54cf4aa250eb480bcce81f7cb0a93a8b49df264e
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn.rst
@@ -0,0 +1,39 @@
+=======================
+paddle.framework
+=======================
+
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ framework_cn/append_backward_cn.rst
+ framework_cn/BuildStrategy_cn.rst
+ framework_cn/CompiledProgram_cn.rst
+ framework_cn/CPUPlace_cn.rst
+ framework_cn/create_global_var_cn.rst
+ framework_cn/create_parameter_cn.rst
+ framework_cn/CUDAPinnedPlace_cn.rst
+ framework_cn/CUDAPlace_cn.rst
+ framework_cn/default_main_program_cn.rst
+ framework_cn/default_startup_program_cn.rst
+ framework_cn/ExecutionStrateg y_cn.rst
+ framework_cn/Executor_cn.rst
+ framework_cn/get_default_dtype_cn.rst
+ framework_cn/global_scope_cn.rst
+ framework_cn/gradients_cn.rst
+ framework_cn/manual_seed_cn.rst
+ framework_cn/get_cuda_rng_state_cn.rst
+ framework_cn/set_cuda_rng_state_cn.rst
+ framework_cn/name_scope_cn.rst
+ framework_cn/ParallelExecutor_cn.rst
+ framework_cn/ParamAttr_cn.rst
+ framework_cn/Print_cn.rst
+ framework_cn/Program_cn.rst
+ framework_cn/program_guard_cn.rst
+ framework_cn/py_func_cn.rst
+ framework_cn/scope_guard_cn.rst
+ framework_cn/set_default_dtype_cn.rst
+ framework_cn/Variable_cn.rst
+ framework_cn/WeightNormParamAttr_cn.rst
diff --git a/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst b/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..55d6084d53c71728e7dac4c1c56e913c828b2b5d
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/BuildStrategy_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_BuildStrategy:
+
+BuildStrategy
+-------------------------------
+:doc_source: paddle.fluid.compiler.BuildStrategy
+
+
diff --git a/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fbbe61cb7bee6054bc4416bbfca77b1ea9bde803
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/CPUPlace_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_CPUPlace:
+
+CPUPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CPUPlace
+
+
diff --git a/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..49e697a6d9a940ffb9c21695fc43143bf4fe531d
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/CUDAPinnedPlace_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_CUDAPinnedPlace:
+
+CUDAPinnedPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPinnedPlace
+
+
diff --git a/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst b/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d04858456cbc3e47d492ff1ef6126f1425ce3076
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/CUDAPlace_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_CUDAPlace:
+
+CUDAPlace
+-------------------------------
+:doc_source: paddle.fluid.core.CUDAPlace
+
+
diff --git a/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst b/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4332e00c7dad9a217401e22a1fa5cfb42817fe08
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/CompiledProgram_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_CompiledProgram:
+
+CompiledProgram
+-------------------------------
+:doc_source: paddle.fluid.compiler.CompiledProgram
+
+
diff --git a/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst b/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..889c1c1ecd3f03b9a5ce00715e87c76eb4ed191f
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/ExecutionStrateg y_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_ExecutionStrateg y:
+
+ExecutionStrateg y
+-------------------------------
+:doc_source: paddle.fluid.compiler.ExecutionStrateg y
+
+
diff --git a/doc/fluid/api_cn/framework_cn/Executor_cn.rst b/doc/fluid/api_cn/framework_cn/Executor_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9597fcd5d704565e1caa32588bab0c3a52ca719f
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/Executor_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_Executor:
+
+Executor
+-------------------------------
+:doc_source: paddle.fluid.executor.Executor
+
+
diff --git a/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst b/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1fe55329fbfd3e67a47071271494600e79bc7f5c
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/ParallelExecutor_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_ParallelExecutor:
+
+ParallelExecutor
+-------------------------------
+:doc_source: paddle.fluid.parallel_executor.ParallelExecutor
+
+
diff --git a/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst b/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fdafdf1514c76de3a5c2946cdaeb1ae608cd97e5
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/ParamAttr_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_ParamAttr:
+
+ParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.ParamAttr
+
+
diff --git a/doc/fluid/api_cn/framework_cn/Print_cn.rst b/doc/fluid/api_cn/framework_cn/Print_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a2be923d06a0d52d151f194ce7b07305e88bb3f2
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/Print_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_Print:
+
+Print
+-------------------------------
+:doc_source: paddle.fluid.layers.control_flow.Print
+
+
diff --git a/doc/fluid/api_cn/framework_cn/Program_cn.rst b/doc/fluid/api_cn/framework_cn/Program_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..74c50c89d8291c1da510d747f8c150bc96e309db
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/Program_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_Program:
+
+Program
+-------------------------------
+:doc_source: paddle.fluid.framework.Program
+
+
diff --git a/doc/fluid/api_cn/framework_cn/Variable_cn.rst b/doc/fluid/api_cn/framework_cn/Variable_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2f9e6e06072bc65c320a4d2beb5cb4f1c46bc6d3
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/Variable_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_Variable:
+
+Variable
+-------------------------------
+:doc_source: paddle.fluid.framework.Variable
+
+
diff --git a/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst b/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a1d4d7fe0c01b806f1fc059c5f6db2113b7d1391
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/WeightNormParamAttr_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_WeightNormParamAttr:
+
+WeightNormParamAttr
+-------------------------------
+:doc_source: paddle.fluid.param_attr.WeightNormParamAttr
+
+
diff --git a/doc/fluid/api_cn/framework_cn/append_backward_cn.rst b/doc/fluid/api_cn/framework_cn/append_backward_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cd799f1d60b8d86a05fc5ab3cf1a32c824b11485
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/append_backward_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_append_backward:
+
+append_backward
+-------------------------------
+:doc_source: paddle.fluid.backward.append_backward
+
+
diff --git a/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst b/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2524a5f56870cd1d398e363f3210f4eab9628a8b
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/create_global_var_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_create_global_var:
+
+create_global_var
+-------------------------------
+:doc_source: paddle.fluid.layers.tensor.create_global_var
+
+
diff --git a/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst b/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5bf4989e63ec1474c4e411415b0d8e1834740301
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/create_parameter_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_create_parameter:
+
+create_parameter
+-------------------------------
+:doc_source: paddle.fluid.layers.create_parameter
+
+
diff --git a/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst b/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..823b70c49cd2d91a3ce0e190293ec3d067635eb1
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/default_main_program_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_default_main_program:
+
+default_main_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_main_program
+
+
diff --git a/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst b/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..50dfc93c9e5b6ae2c33596567bed24991337a7ff
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/default_startup_program_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_default_startup_program:
+
+default_startup_program
+-------------------------------
+:doc_source: paddle.fluid.framework.default_startup_program
+
+
diff --git a/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst b/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..35d76080ff458cfaed76a32fc594c2dedd8a033d
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/get_cuda_rng_state_cn.rst
@@ -0,0 +1,24 @@
+.. _cn_api_paddle_framework_get_cuda_rng_state:
+
+get_cuda_rng_state
+-------------------------------
+
+.. py:function:: paddle.framework.get_cuda_rng_state()
+
+
+获取cuda随机数生成器的状态信息
+
+
+参数:
+
+ 无
+
+返回:
+ GeneratorState:对象
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ sts = paddle.get_cuda_rng_state()
diff --git a/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst b/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cedfe95fb87a30c2038e33484298affc4439c047
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/get_default_dtype_cn.rst
@@ -0,0 +1,23 @@
+.. _cn_api_paddle_framework_get_default_dtype:
+
+get_default_dtype
+-------------------------------
+
+.. py:function:: paddle.get_default_dtype()
+
+
+得到当前全局的dtype。 该值初始是float32。
+
+
+参数:
+
+ 无
+
+返回: string,这个全局dtype仅支持float16、float32、float64
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ paddle.get_default_dtype()
diff --git a/doc/fluid/api_cn/framework_cn/global_scope_cn.rst b/doc/fluid/api_cn/framework_cn/global_scope_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fa8317ccd449bfcacbfc399ad2b6139607ff3ae9
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/global_scope_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_global_scope:
+
+global_scope
+-------------------------------
+:doc_source: paddle.fluid.executor.global_scope
+
+
diff --git a/doc/fluid/api_cn/framework_cn/gradients_cn.rst b/doc/fluid/api_cn/framework_cn/gradients_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4303d5753a28bac6c62136cfd29d1d5a0decffc5
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/gradients_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_gradients:
+
+gradients
+-------------------------------
+:doc_source: paddle.fluid.backward.gradients
+
+
diff --git a/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst b/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7ddf88f632df1a1c4fd6aea961b9cf30d75c682c
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/manual_seed_cn.rst
@@ -0,0 +1,24 @@
+.. _cn_api_paddle_framework_manual_seed:
+
+manual_seed
+-------------------------------
+
+.. py:function:: paddle.framework.manual_seed(seed)
+
+
+设置全局默认generator的随机种子。
+
+
+参数:
+
+ - **seed** (int) - 要设置的的随机种子,推荐使用较大的整数。
+
+返回:
+ Generator:全局默认generator对象。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ paddle.manual_seed(102)
diff --git a/doc/fluid/api_cn/framework_cn/name_scope_cn.rst b/doc/fluid/api_cn/framework_cn/name_scope_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fa537fa5f86529e46de1a9d8c6aa318fe4a0acc0
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/name_scope_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_name_scope:
+
+name_scope
+-------------------------------
+:doc_source: paddle.fluid.framework.name_scope
+
+
diff --git a/doc/fluid/api_cn/framework_cn/program_guard_cn.rst b/doc/fluid/api_cn/framework_cn/program_guard_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a472d59f9d11e9ae00171c7cbe631d56e4ffdcca
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/program_guard_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_program_guard:
+
+program_guard
+-------------------------------
+:doc_source: paddle.fluid.framework.program_guard
+
+
diff --git a/doc/fluid/api_cn/framework_cn/py_func_cn.rst b/doc/fluid/api_cn/framework_cn/py_func_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..369601410f3f6bbba7948aca2c65dc17f2a27e34
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/py_func_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_py_func:
+
+py_func
+-------------------------------
+:doc_source: paddle.fluid.layers.nn.py_func
+
+
diff --git a/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst b/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..570e2aef328233a268931a141c04c0d540924e0f
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/scope_guard_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_framework_cn_scope_guard:
+
+scope_guard
+-------------------------------
+:doc_source: paddle.fluid.executor.scope_guard
+
+
diff --git a/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst b/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e63225b81eba139c52a9fdb2fecc54ab6ede2e93
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/set_cuda_rng_state_cn.rst
@@ -0,0 +1,25 @@
+.. _cn_api_paddle_framework_set_cuda_rng_state:
+
+set_cuda_rng_state
+-------------------------------
+
+.. py:function:: paddle.framework.set_cuda_rng_state(state_list)
+
+
+设置cuda随机数生成器的状态信息
+
+
+参数:
+
+ - **state_list** (list [GeneratorState]) - 需要设置的随机数生成器状态信息列表,通过get_cuda_rng_state()获取。
+
+返回:
+ 无
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ sts = paddle.get_cuda_rng_state()
+ paddle.set_cuda_rng_state(sts)
diff --git a/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst b/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9c6c6fe948acb39b526261c9ef92b8eaf4b59858
--- /dev/null
+++ b/doc/fluid/api_cn/framework_cn/set_default_dtype_cn.rst
@@ -0,0 +1,23 @@
+.. _cn_api_paddle_framework_set_default_dtype:
+
+set_default_dtype
+-------------------------------
+
+.. py:function:: paddle.set_default_dtype(d)
+
+
+设置默认的全局dtype。 默认的全局dtype最初是float32。
+
+
+参数:
+
+ - **d** (string|np.dtype) - 设为默认值的dtype。 它仅支持float16,float32和float64。
+
+返回: 无
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle
+ paddle.set_default_dtype("float32")
diff --git a/doc/fluid/api_cn/gen_index.py b/doc/fluid/api_cn/gen_index.py
index 35c33d0bfb128ac92ec0c77f93cc4433facb1cb2..4c97004d53137e4c5faa6dd58d5777e9e5106d64 100644
--- a/doc/fluid/api_cn/gen_index.py
+++ b/doc/fluid/api_cn/gen_index.py
@@ -14,8 +14,20 @@ API Reference
''')
file_object.write(' ../api_guides/index_cn.rst'+'\n')
+
+ file_names = []
+ file_names = glob.glob("*.rst")
+
+ for file_name in sorted(file_names):
+ with open(file_name, 'r')as f:
+ for i in range(2):
+ line = f.readline().strip()
+ if line.find('paddle.') != -1:
+ file_object.write(' '+file_name + "\n")
+ file_names.remove(file_name)
+
file_object.write(' fluid_cn.rst'+'\n')
- for file_name in sorted(glob.glob("*.rst")):
+ for file_name in sorted(file_names):
if file_name != 'index.rst' and file_name != 'index_cn.rst' and file_name != 'fluid_cn.rst':
file_object.write(' '+file_name + "\n")
file_object.close( )
diff --git a/doc/fluid/api_cn/imperative_cn.rst b/doc/fluid/api_cn/imperative_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0f99f1c8f7decf504eb675c70b8a81c2715cf6db
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn.rst
@@ -0,0 +1,31 @@
+=======================
+paddle.imperative
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ imperative_cn/CosineDecay_cn.rst
+ imperative_cn/DataParallel_cn.rst
+ imperative_cn/declarative_cn.rst
+ imperative_cn/enabled_cn.rst
+ imperative_cn/ExponentialDecay_cn.rst
+ imperative_cn/grad_cn.rst
+ imperative_cn/guard_cn.rst
+ imperative_cn/InverseTimeDecay_cn.rst
+ imperative_cn/jit_cn.rst
+ imperative_cn/load_cn.rst
+ imperative_cn/load_dygraph_cn.rst
+ imperative_cn/NaturalExpDecay_cn.rst
+ imperative_cn/no_grad_cn.rst
+ imperative_cn/NoamDecay_cn.rst
+ imperative_cn/ParallelEnv_cn.rst
+ imperative_cn/PiecewiseDecay_cn.rst
+ imperative_cn/PolynomialDecay_cn.rst
+ imperative_cn/prepare_context_cn.rst
+ imperative_cn/ProgramTranslator_cn.rst
+ imperative_cn/save_cn.rst
+ imperative_cn/save_dygraph_cn.rst
+ imperative_cn/to_variable_cn.rst
+ imperative_cn/TracedLayer_cn.rst
+ imperative_cn/TranslatedLayer_cn.rst
diff --git a/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bbfb9d267d5a95ecba6fa1448539e5147e151a50
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/CosineDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_CosineDecay:
+
+CosineDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst b/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6fc3900aa741404a9efc384ea511ebd60db81576
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/DataParallel_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_DataParallel:
+
+DataParallel
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.DataParallel
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6caba0a407430f2696ce40c267de8b5c3cf314d8
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/ExponentialDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_ExponentialDecay:
+
+ExponentialDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c08fb8be19228648219de9d346acc3b817febff8
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/InverseTimeDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_InverseTimeDecay:
+
+InverseTimeDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7c1cae72f52f35651cf38b6bcf7ff67daf85f696
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/NaturalExpDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_NaturalExpDecay:
+
+NaturalExpDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..12a4c8a26d6dbd86447b83b110e83644670d37f7
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/NoamDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_NoamDecay:
+
+NoamDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst b/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a6e6717d78b350fd81e8b1defa50f48c7ae8c05e
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/ParallelEnv_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_ParallelEnv:
+
+ParallelEnv
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.ParallelEnv
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb3ace6068630a572bc711c9331007b762ccc503
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/PiecewiseDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_PiecewiseDecay:
+
+PiecewiseDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst b/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9d2ab31fcff95c385f5b1556006c657f7704ed51
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/PolynomialDecay_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_PolynomialDecay:
+
+PolynomialDecay
+-------------------------------
+:doc_source: paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst b/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3ff078a775b7a42c47078e5d7bc7c1b41fcb1a02
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/ProgramTranslator_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_ProgramTranslator:
+
+ProgramTranslator
+-------------------------------
+:doc_source: paddle.fluid.dygraph.ProgramTranslator
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst b/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..de2697d6d9f068cb1870de9f23f4d8b502e2568d
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/TracedLayer_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_TracedLayer:
+
+TracedLayer
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.TracedLayer
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst b/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..546094acf65c4fb30341d60ea157576601ae8766
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/TranslatedLayer_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_TranslatedLayer:
+
+TranslatedLayer
+-------------------------------
+:doc_source: paddle.fluid.dygraph.io.TranslatedLayer
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/declarative_cn.rst b/doc/fluid/api_cn/imperative_cn/declarative_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a72beb42f9f20921e44f27fba6e799a8091575fe
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/declarative_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_declarative:
+
+declarative
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.declarative
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/enabled_cn.rst b/doc/fluid/api_cn/imperative_cn/enabled_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a127f528edd20149f51f9b0ba6954aa0a2bfe661
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/enabled_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_enabled:
+
+enabled
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.enabled
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/grad_cn.rst b/doc/fluid/api_cn/imperative_cn/grad_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e3d75a0ee5c372a1d9d483eab34bae932b333d48
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/grad_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_grad:
+
+grad
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.grad
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/guard_cn.rst b/doc/fluid/api_cn/imperative_cn/guard_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..93db3250064b355d2f9f712202b11ffc6bffe518
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/guard_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_guard:
+
+guard
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.guard
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..baa8019a5285b3dd667fec3921097c54e37b45ee
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn.rst
@@ -0,0 +1,12 @@
+===
+jit
+===
+
+.. toctree::
+ :maxdepth: 1
+
+ jit_cn/save_cn.rst
+ jit_cn/set_code_level_cn.rst
+ jit_cn/set_verbosity.rst
+ jit_cn/load_cn.rst
+ jit_cn/SaveLoadConfig_cn.rst
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..154e987bdb3ed8d3d86858b13b797b897b8eed62
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn/SaveLoadConfig_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_imperative_jit_SaveLoadConfig:
+
+SaveLoadConfig
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.SaveLoadConfig
\ No newline at end of file
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a326fa58f1bf3634a498113bccac46627df0d8e1
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn/load_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_imperative_jit_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.load
\ No newline at end of file
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0c36588fa37794a81db4c534e2f54ad8aaddd66f
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn/save_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_imperative_jit_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.save
\ No newline at end of file
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e57014212735b9c3e7610ad9221e96439960f90b
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn/set_code_level_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_imperative_jit_set_code_level:
+
+set_code_level
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.set_code_level
\ No newline at end of file
diff --git a/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst b/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a20b6c67dd4472da83ecabcbaad519cabce0f3c9
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/jit_cn/set_verbosity_cn.rst
@@ -0,0 +1,5 @@
+.. _cn_api_imperative_jit_set_verbosity:
+
+set_verbosity
+-------------------------------
+:doc_source: paddle.fluid.dygraph.jit.set_verbosity
\ No newline at end of file
diff --git a/doc/fluid/api_cn/imperative_cn/load_cn.rst b/doc/fluid/api_cn/imperative_cn/load_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3c078604d2585703458260c82ee8dd9a2f2f5a65
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/load_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_load:
+
+load
+-------------------------------
+:doc_source: paddle.fluid.dygraph.checkpoint.load_dygraph
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst b/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..989ffa9f24f2ddac08a2365b71fd9929e11d2728
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/no_grad_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_no_grad:
+
+no_grad
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.no_grad
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst b/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..27a531db0286e3803478ecc734a0b565261882d4
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/prepare_context_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_prepare_context:
+
+prepare_context
+-------------------------------
+:doc_source: paddle.fluid.dygraph.parallel.prepare_context
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/save_cn.rst b/doc/fluid/api_cn/imperative_cn/save_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d515e9c076e610ff296d4e84469f37e1f738d83e
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/save_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_save:
+
+save
+-------------------------------
+:doc_source: paddle.fluid.dygraph.checkpoint.save_dygraph
+
+
diff --git a/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst b/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d2557bc0b630ce4e03d2d802d041313db3068365
--- /dev/null
+++ b/doc/fluid/api_cn/imperative_cn/to_variable_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_imperative_cn_to_variable:
+
+to_variable
+-------------------------------
+:doc_source: paddle.fluid.dygraph.base.to_variable
+
+
diff --git a/doc/fluid/api_cn/incubate_cn.rst b/doc/fluid/api_cn/incubate_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3174d2d82b96b5396e67feabf572ac595fbbaee6
--- /dev/null
+++ b/doc/fluid/api_cn/incubate_cn.rst
@@ -0,0 +1,11 @@
+=======================
+paddle.incubate
+=======================
+
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ incubate_cn/hapi_cn.rst
diff --git a/doc/fluid/api_cn/incubate_cn/hapi_cn.rst b/doc/fluid/api_cn/incubate_cn/hapi_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..983c10047db01ce2ad6accab9c6320b4ebdbb4fa
--- /dev/null
+++ b/doc/fluid/api_cn/incubate_cn/hapi_cn.rst
@@ -0,0 +1,12 @@
+=======================
+hapi
+=======================
+
+
+
+
+.. toctree::
+ :maxdepth: 1
+
+ hapi_cn/Model_cn.rst
+ hapi_cn/set_device_cn.rst
diff --git a/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst b/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4e147a58a436901c93e9363cf48e62ecc434a4b7
--- /dev/null
+++ b/doc/fluid/api_cn/incubate_cn/hapi_cn/Model_cn.rst
@@ -0,0 +1,530 @@
+.. _cn_api_paddle_incubate_hapi_model_Model:
+
+Model
+-------------------------------
+
+.. py:class:: paddle.incubate.hapi.model.Model()
+
+ ``Model`` 对象是一个具备训练、测试、推理的神经网络。该对象同时支持静态图和动态图模式,通过 ``fluid.enable_dygraph()`` 来切换。需要注意的是,该开关需要在实例化 ``Model`` 对象之前使用。 在静态图模式下,输入需要使用 ``hapi.Input`` 来定义。
+
+**代码示例**:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle
+ import paddle.fluid as fluid
+
+ from paddle.incubate.hapi.model import Model, Input, set_device
+ from paddle.incubate.hapi.loss import CrossEntropy
+ from paddle.incubate.hapi.datasets import MNIST
+ from paddle.incubate.hapi.metrics import Accuracy
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 10, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+ device = set_device('cpu')
+
+ # 切换成动态图模式,默认使用静态图模式
+ fluid.enable_dygraph(device)
+
+ model = MyModel()
+ optim = fluid.optimizer.SGD(learning_rate=1e-3,
+ parameter_list=model.parameters())
+
+ inputs = [Input([None, 784], 'float32', name='x')]
+ labels = [Input([None, 1], 'int64', name='label')]
+
+ mnist_data = MNIST(mode='train', chw_format=False)
+ model.prepare(optim,
+ CrossEntropy(average=True),
+ Accuracy(),
+ inputs,
+ labels,
+ device=device)
+ model.fit(mnist_data, epochs=2, batch_size=32, verbose=1)
+
+
+.. py:function:: train_batch(inputs, labels=None)
+
+在一个批次的数据上进行训练。
+
+参数:
+ - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray``。
+ - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。
+
+返回:一个列表,包含了训练损失函数的值,如果定义了评估函数,还会包含评估函数得到的指标。
+
+返回类型:list
+
+**代码示例**:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+
+ from paddle.fluid.dygraph import Linear
+ from paddle.incubate.hapi.loss import CrossEntropy
+ from paddle.incubate.hapi.model import Model, Input, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = Linear(784, 10, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ device = set_device('cpu')
+ fluid.enable_dygraph(device)
+
+ model = MyModel()
+ optim = fluid.optimizer.SGD(learning_rate=1e-3,
+ parameter_list=model.parameters())
+
+ inputs = [Input([None, 784], 'float32', name='x')]
+ labels = [Input([None, 1], 'int64', name='label')]
+ model.prepare(optim,
+ CrossEntropy(average=True),
+ inputs=inputs,
+ labels=labels,
+ device=device)
+ data = np.random.random(size=(4,784)).astype(np.float32)
+ label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
+ loss = model.train_batch([data], [label])
+ print(loss)
+
+.. py:function:: eval_batch(inputs, labels=None)
+
+在一个批次的数据上进行评估。
+
+参数:
+ - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。
+ - **labels** (list) - 1维列表,每个元素都是一批次的输入标签,数据类型为 ``numpy.ndarray`` 。默认值:None。
+
+返回:一个列表,包含了评估损失函数的值,如果定义了评估函数,还会包含评估函数得到的指标。
+
+返回类型:list
+
+**代码示例**:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+
+ from paddle.incubate.hapi.loss import CrossEntropy
+ from paddle.incubate.hapi.model import Model, Input, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 10, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ device = set_device('cpu')
+ fluid.enable_dygraph(device)
+
+ model = MyModel()
+ optim = fluid.optimizer.SGD(learning_rate=1e-3,
+ parameter_list=model.parameters())
+
+ inputs = [Input([None, 784], 'float32', name='x')]
+ labels = [Input([None, 1], 'int64', name='label')]
+ model.prepare(optim,
+ CrossEntropy(average=True),
+ inputs=inputs,
+ labels=labels,
+ device=device)
+ data = np.random.random(size=(4,784)).astype(np.float32)
+ label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
+ loss = model.eval_batch([data], [label])
+ print(loss)
+
+.. py:function:: test_batch(inputs)
+
+在一个批次的数据上进行测试。
+
+参数:
+ - **inputs** (list) - 1维列表,每个元素都是一批次的输入数据,数据类型为 ``numpy.ndarray`` 。
+
+返回:一个列表,包含了模型的输出。
+
+返回类型:list
+
+**代码示例**:
+
+.. code-block:: python
+
+ import numpy as np
+ import paddle.fluid as fluid
+ from paddle.incubate.hapi.model import Model, Input, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ device = set_device('cpu')
+ fluid.enable_dygraph(device)
+
+ model = MyModel()
+ inputs = [Input([None, 784], 'float32', name='x')]
+ model.prepare(inputs=inputs,
+ device=device)
+ data = np.random.random(size=(4,784)).astype(np.float32)
+ out = model.eval_batch([data])
+ print(out)
+
+.. py:function:: save(path):
+
+将模型的参数和训练过程中优化器的信息保存到指定的路径。所有的模型参数都会保存到一个后缀为 ``.pdparams`` 的文件中。
+所有的优化器信息和相关参数,比如 ``Adam`` 优化器中的 ``beta1`` , ``beta2`` ,``momentum`` 等,都会被保存到后缀为 ``.pdopt``
+的文件中。
+
+参数:
+ - **path** (str) - 保存的文件名前缀。格式如 ``dirname/file_prefix`` 或者 ``file_prefix`` 。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.incubate.hapi.model import Model, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ device = set_device('cpu')
+ fluid.enable_dygraph(device)
+ model = MyModel()
+ model.save('checkpoint/test')
+
+.. py:function:: load(path, skip_mismatch=False, reset_optimizer=False):
+
+从指定的文件中载入模型参数和优化器参数,如果不想恢复优化器参数信息,优化器信息文件可以不存在。
+
+参数:
+ - **path** (str) - 保存参数或优化器信息的文件前缀。格式如 ``path.pdparams`` 或者 ``path.pdopt`` ,后者是非必要的,如果不想恢复优化器信息。
+ - **skip_mismatch** (bool) - 是否需要跳过保存的模型文件中形状或名称不匹配的参数,设置为 ``False`` 时,当遇到不匹配的参数会抛出一个错误。默认值:False。
+ - **reset_optimizer** (bool) - 设置为 ``True`` 时,会忽略提供的优化器信息文件。否则会载入提供的优化器信息。默认值:False。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ from paddle.incubate.hapi.model import Model, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ device = set_device('cpu')
+ fluid.enable_dygraph(device)
+ model = MyModel()
+ model.load('checkpoint/test')
+
+.. py:function:: parameters(*args, **kwargs):
+
+返回一个包含模型所有参数的列表。
+
+返回:在静态图中返回一个包含 ``Parameter`` 的列表,在动态图中返回一个包含 ``ParamBase`` 的列表。
+
+**代码示例**:
+
+.. code-block:: python
+ import paddle.fluid as fluid
+
+ from paddle.incubate.hapi.model import Model, Input, set_device
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(20, 10, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ fluid.enable_dygraph()
+ model = MyModel()
+ params = model.parameters()
+
+
+.. py:function:: prepare(optimizer=None, loss_function=None, metrics=None, inputs=None, labels=None, device=None):
+
+返回一个包含模型所有参数的列表。
+
+参数:
+ - **optimizer** (Optimizer) - 当训练模型的,该参数必须被设定。当评估或测试的时候,该参数可以不设定。默认值:None。
+ - **loss_function** (Loss) - 当训练模型的,该参数必须被设定。默认值:None。
+ - **metrics** (Metric|list[Metric]) - 当该参数被设定时,所有给定的评估方法会在训练和测试时被运行,并返回对应的指标。默认值:None。
+ - **inputs** (Input|list[Input]|dict) - 网络的输入,对于静态图,该参数必须给定。默认值:None。
+ - **labels** (Input|list[Input]|dict) - 标签,网络的输入。对于静态图,在训练和评估时该参数必须给定。默认值:None。
+ - **device** (str|fluid.CUDAPlace|fluid.CPUPlace|None) - 网络运行的设备,当不指定时,会根据环境和安装的 ``paddle`` 自动选择。默认值:None。
+
+返回:None
+
+.. py:function:: fit(train_data=None, eval_data=None, batch_size=1, epochs=1, eval_freq=1, log_freq=10, save_dir=None, save_freq=1, verbose=2, drop_last=False, shuffle=True, num_workers=0, callbacks=None):
+
+训练模型。当 ``eval_data`` 给定时,会在 ``eval_freq`` 个 ``epoch`` 后进行一次评估。
+
+参数:
+ - **train_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。
+ - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。当给定时,会在每个 ``epoch`` 后都会进行评估。默认值:None。
+ - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``train_data`` 或 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。
+ - **epochs** (int) - 训练的轮数。默认值:1。
+ - **eval_freq** (int) - 评估的频率,多少个 ``epoch`` 评估一次。默认值:1。
+ - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。
+ - **save_dir** (str|None) - 保存模型的文件夹,如果不设定,将不保存模型。默认值:None。
+ - **save_freq** (int) - 保存模型的频率,多少个 ``epoch`` 保存一次模型。默认值:1。
+ - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。
+ - **drop_last** (bool) - 是否丢弃训练数据中最后几个不足设定的批次大小的数据。默认值:False。
+ - **shuffle** (bool) - 是否对训练数据进行洗牌。当 ``train_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。
+ - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``train_data`` 和 ``eval_data`` 都为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。
+ - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ # 1. 使用Dataset训练,并设置batch_size的例子。
+ import paddle.fluid as fluid
+
+ from paddle.incubate.hapi.model import Model, Input, set_device
+ from paddle.incubate.hapi.loss import CrossEntropy
+ from paddle.incubate.hapi.metrics import Accuracy
+ from paddle.incubate.hapi.datasets import MNIST
+ from paddle.incubate.hapi.vision.models import LeNet
+
+ dynamic = True
+ device = set_device('cpu')
+ fluid.enable_dygraph(device) if dynamic else None
+
+ train_dataset = MNIST(mode='train')
+ val_dataset = MNIST(mode='test')
+
+ inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
+ labels = [Input([None, 1], 'int64', name='label')]
+
+ model = LeNet()
+ optim = fluid.optimizer.Adam(
+ learning_rate=0.001, parameter_list=model.parameters())
+ model.prepare(
+ optim,
+ CrossEntropy(),
+ Accuracy(topk=(1, 2)),
+ inputs=inputs,
+ labels=labels,
+ device=device)
+ model.fit(train_dataset,
+ val_dataset,
+ epochs=2,
+ batch_size=64,
+ save_dir='mnist_checkpoint')
+
+ # 2. 使用Dataloader训练的例子.
+
+ from paddle.incubate.hapi.model import Model, Input, set_device
+ from paddle.incubate.hapi.loss import CrossEntropy
+ from paddle.incubate.hapi.metrics import Accuracy
+ from paddle.incubate.hapi.datasets import MNIST
+ from paddle.incubate.hapi.vision.models import LeNet
+
+ dynamic = True
+ device = set_device('cpu')
+ fluid.enable_dygraph(device) if dynamic else None
+
+ train_dataset = MNIST(mode='train')
+ train_loader = fluid.io.DataLoader(train_dataset,
+ places=device, batch_size=64)
+ val_dataset = MNIST(mode='test')
+ val_loader = fluid.io.DataLoader(val_dataset,
+ places=device, batch_size=64)
+
+ inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
+ labels = [Input([None, 1], 'int64', name='label')]
+
+ model = LeNet()
+ optim = fluid.optimizer.Adam(
+ learning_rate=0.001, parameter_list=model.parameters())
+ model.prepare(
+ optim,
+ CrossEntropy(),
+ Accuracy(topk=(1, 2)),
+ inputs=inputs,
+ labels=labels,
+ device=device)
+ model.fit(train_loader,
+ val_loader,
+ epochs=2,
+ save_dir='mnist_checkpoint')
+
+
+.. py:function:: evaluate(eval_data, batch_size=1, log_freq=10, verbose=2, num_workers=0, callbacks=None):
+
+评估模型。
+
+参数:
+ - **eval_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。
+ - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。
+ - **log_freq** (int) - 日志打印的频率,多少个 ``step`` 打印一次日志。默认值:1。
+ - **verbose** (int) - 可视化的模型,必须为0,1,2。当设定为0时,不打印日志,设定为1时,使用进度条的方式打印日志,设定为2时,一行一行地打印日志。默认值:2。
+ - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。
+ - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。该参数不给定时,默认会插入 ``ProgBarLogger`` 和 ``ModelCheckpoint`` 这两个实例。默认值:None。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ # declarative mode
+ import numpy as np
+ from paddle.incubate.hapi.metrics import Accuracy
+ from paddle.incubate.hapi.datasets import MNIST
+ from paddle.incubate.hapi.vision.transforms import Compose,Resize
+ from paddle.incubate.hapi.vision.models import LeNet
+ from paddle.incubate.hapi.model import Input, set_device
+
+
+ inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
+ labels = [Input([None, 1], 'int64', name='label')]
+
+ val_dataset = MNIST(mode='test')
+
+ model = LeNet()
+ model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels)
+
+ result = model.evaluate(val_dataset, batch_size=64)
+ print(result)
+
+ # imperative mode
+ import paddle.fluid.dygraph as dg
+ place = set_device('cpu')
+ with dg.guard(place) as g:
+ model = LeNet()
+ model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels)
+
+ result = model.evaluate(val_dataset, batch_size=64)
+ print(result)
+
+
+.. py:function:: predict(test_data, batch_size=1, num_workers=0, stack_outputs=False, callbacks=None):
+
+模型预测。
+
+参数:
+ - **test_data** (Dataset|DataLoader) - 一个可迭代的数据源,推荐给定一个 ``paddle paddle.io.Dataset`` 或 ``paddle.io.Dataloader`` 的实例。默认值:None。
+ - **batch_size** (int) - 训练数据或评估数据的批大小,当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:1。
+ - **num_workers** (int) - 启动子进程用于读取数据的数量。当 ``eval_data`` 为 ``DataLoader`` 的实例时,该参数会被忽略。默认值:True。
+ - **stack_outputs** (bool) - 是否将输出进行堆叠。默认值:False。
+ - **callbacks** (Callback|list[Callback]|None) - ``Callback`` 的一个实例或实例列表。默认值:None。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ # declarative mode
+ import numpy as np
+ from paddle.incubate.hapi.metrics import Accuracy
+ from paddle.incubate.hapi.datasets import MNIST
+ from paddle.incubate.hapi.vision.transforms import Compose,Resize
+ from paddle.incubate.hapi.vision.models import LeNet
+ from paddle.incubate.hapi.model import Input, set_device
+
+ class MnistDataset(MNIST):
+ def __init__(self, mode, return_label=True):
+ super(MnistDataset, self).__init__(mode=mode)
+ self.return_label = return_label
+
+ def __getitem__(self, idx):
+ img = np.reshape(self.images[idx], [1, 28, 28])
+ if self.return_label:
+ return img, np.array(self.labels[idx]).astype('int64')
+ return img,
+
+ def __len__(self):
+ return len(self.images)
+
+ inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
+
+ test_dataset = MnistDataset(mode='test', return_label=False)
+
+ model = LeNet()
+ model.prepare(inputs=inputs)
+
+ result = model.predict(test_dataset, batch_size=64)
+ print(result)
+
+ # imperative mode
+ import paddle.fluid.dygraph as dg
+ place = set_device('cpu')
+ with dg.guard(place) as g:
+ model = LeNet()
+ model.prepare(inputs=inputs)
+
+ result = model.predict(test_dataset, batch_size=64)
+ print(result)
+
+
+.. py:function:: save_inference_model(save_dir, model_filename=None, params_filename=None, model_only=False):
+
+模型预测。
+
+参数:
+ - **save_dir** (str) - 保存推理模型的路径。
+ - **model_filename** (str,可选) - 保存预测模型结构 ``Inference Program`` 的文件名称。若设置为None,则使用 ``__model__`` 作为默认的文件名。默认值:None。
+ - **params_filename** (str,可选) - 保存预测模型所有相关参数的文件名称。若设置为None,则模型参数被保存在单独的文件中。
+ - **model_only** (bool,可选) - 若为True,则只保存预测模型的网络结构,而不保存预测模型的网络参数。默认值:False。
+
+返回:None
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+
+ from paddle.incubate.hapi.model import Model, Input
+
+ class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self._fc = fluid.dygraph.Linear(784, 1, act='softmax')
+ def forward(self, x):
+ y = self._fc(x)
+ return y
+
+ model = MyModel()
+ inputs = [Input([-1, 1, 784], 'float32', name='input')]
+ model.prepare(inputs=inputs)
+
+ model.save_inference_model('checkpoint/test')
\ No newline at end of file
diff --git a/doc/fluid/api_cn/index_cn.rst b/doc/fluid/api_cn/index_cn.rst
index 8599fb58c60845168918446808735b2cec68951d..f8e5363f412582fa92ab61661e3d992050d71961 100644
--- a/doc/fluid/api_cn/index_cn.rst
+++ b/doc/fluid/api_cn/index_cn.rst
@@ -2,15 +2,119 @@
API Reference
=============
+
+
+基础API
+-------
+
+飞桨2.0提供了新的API,可以同时支持声明式和命令式两种开发模式,比如paddle.nn.Linear,避免在两种模式下使用不同的API造成困惑。原飞桨1.x的API位于paddle.fluid目录下,其中部分组网类的API,只能用于声明式开发,比如:fluid.layers.fc,无法用于命令式开发。
+
+飞桨2.0对API的目录结构进行了调整,从原来的paddle.fluid目录调整到paddle目录下,使得开发接口更加清晰,调整后的目录结构如下:
+
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| 目录 | 功能和包含API |
++=====================+===========================================================================================================+
+| paddle.\* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor, paddle.framework目录下的所有API |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算elementwise\_add, 查找argmax等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.nn | 跟组网相关的API,比如:输入占位符data/Input,控制流while\_loop/cond,损失函数,卷积,LSTM等,激活函数等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.framework | 基础框架相关的API,比如:Variable, Program, Executor等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.imperative | imprerative模式专用的API,比如:to\_variable, prepare\_context等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.optimizer | 优化算法相关API,比如:SGD,Adagrad, Adam等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.metric | 评估指标计算相关的API,比如:accuracy, cos\_sim等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.device | 设备管理相关API,比如:CPUPlace, CUDAPlace等 |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+| paddle.fleet | 分布式相关API |
++---------------------+-----------------------------------------------------------------------------------------------------------+
+
+同时飞桨2.0对部分Paddle
+1.x版本的API进行了清理,删除了部分不再推荐使用的API,具体信息请参考Release
+Note。
+
+
+高层API
+-------
+
+使用飞桨进行深度学习任务的开发,整体过程包括数据处理、组网、训练、评估、模型导出、预测部署这些基本的操作。这些基本操作在不同的任务中会反复出现,使用基础API进行开发时,需要开发者重复地写这些基础操作的代码,增加了模型开发的工作量。高层API针对这些基础操作进行了封装,提供更高层的开发接口,开发者只需要关心数据处理和自定义组网,其他工作可以通过调用高层API来完成。在MNIST手写数字识别任务中,对比动态图基础API的实现方式,通过使用高层API可以减少80%的非组网类代码。
+
+使用高层API的另外一个好处是,可以通过一行代码\ ``paddle.enable_imperative``\ ,切换命令式编程模式和声明式编程模式。在开发阶段,可以使用的命令式编程模式,方便调试;开发完成后,可以切换到声明式编程模式,加速训练和方便部署。兼具了命令式编程实时执行,容易调试的优点,以及声明式编程全局优化和容易部署的优点。
+
+以下为高层API的一个基础示例
+
+.. code:: python
+
+ import numpy as np
+ import paddle
+ import paddle.nn.functional as F
+ from paddle.incubate.hapi.model import Model, Input, Loss
+ from paddle.incubate.hapi.loss import CrossEntropy
+
+ #高层API的组网方式需要继承Model,Model类实现了模型执行所需的逻辑
+ class SimpleNet(Model):
+ def __init__(self, in_size, out_size):
+ super(SimpleNet, self).__init__()
+ self._linear = paddle.nn.Linear(in_size, out_size)
+ def forward(self, x):
+ y = self._linear(x)
+ z = self._linear(y)
+ pred = F.softmax(z)
+ return pred
+
+ #兼容声明式开发模式,定义数据形状类型,如果不使用声明式编程模式,可以不定义数据占位符
+ inputs = [Input([None, 8], 'float32', name='image')]
+ labels = [Input([None, 1], 'int64', name='labels')]
+
+ #定义模型网络结构,包括指定损失函数和优化算法
+ model = SimpleNet(8, 8)
+ optimizer = paddle.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=model.parameters())
+ model.prepare(optimizer, CrossEntropy(), None, inputs, labels, device='cpu')
+
+ #切换执行模式
+ paddle.enable_imperative(paddle.CPUPlace())
+
+ #基于batch的训练
+ batch_num = 10
+ x = np.random.random((4, 8)).astype('float32')
+ y = np.random.randint(0, 8, (4, 1)).astype('int64')
+ for i in range(batch_num):
+ model.train_batch(inputs=x, labels=y)
+
+更多高层API开发的模型和示例请参考github Repo:
+`hapi `__
+
+
.. toctree::
:maxdepth: 1
../api_guides/index_cn.rst
+ paddle_cn.rst
+ dataset_cn.rst
+ tensor_cn.rst
+ nn_cn.rst
+ imperative_cn.rst
+ declarative_cn.rst
+ optimizer_cn.rst
+ static_cn.rst
+ metric_cn.rst
+ framework_cn.rst
+ io_cn.rst
+ utils_cn.rst
+ incubate_cn.rst
fluid_cn.rst
- api_tree_cn.rst
backward_cn.rst
clip_cn.rst
+ data_cn/data_reader_cn.rst
+ data_cn/dataset_cn.rst
dataset_cn.rst
+ distributed_cn.rst
dygraph_cn.rst
executor_cn.rst
initializer_cn.rst
@@ -18,8 +122,8 @@ API Reference
layers_cn.rst
metrics_cn.rst
nets_cn.rst
- optimizer_cn.rst
profiler_cn.rst
regularizer_cn.rst
transpiler_cn.rst
unique_name_cn.rst
+ static_cn.rst
diff --git a/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst
index 4e3031e4f0a8d4d4ac5879f450289fa3d600168d..58f1dc08bb7a89a7fc4f43342adef144b50e0ba9 100644
--- a/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/BilinearInitializer_cn.rst
@@ -1,44 +1,47 @@
-.. _cn_api_fluid_initializer_BilinearInitializer:
-
-BilinearInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.BilinearInitializer())
-
+.. _cn_api_fluid_initializer_BilinearInitializer:
+
+BilinearInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.BilinearInitializer())
+
+
+
+
该接口为参数初始化函数,用于转置卷积函数中,对输入进行上采样。用户通过任意整型因子放大shape为(B,C,H,W)的特征图。
-
+
返回:对象
用法如下:
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
import math
- factor = 2
- C = 2
- H = W = 32
- w_attr = fluid.ParamAttr(
- learning_rate=0.,
- regularizer=fluid.regularizer.L2Decay(0.),
- initializer=fluid.initializer.BilinearInitializer())
- x = fluid.layers.data(name="data", shape=[4, H, W],
- dtype="float32")
- conv_up = fluid.layers.conv2d_transpose(
- input=x,
- num_filters=C,
- output_size=None,
- filter_size=2 * factor - factor % 2,
- padding=int(math.ceil((factor - 1) / 2.)),
- stride=factor,
- groups=C,
- param_attr=w_attr,
- bias_attr=False)
-
-上述代码实现的是将输入x(shape=[-1, 4, H, W])经过转置卷积得到shape=[-1, C, H*factor, W*factor]的输出,num_filters = C和groups = C 表示这是按通道转置的卷积函数,输出通道为C,转置卷积的groups为C。滤波器shape为(C,1,K,K),K为filter_size。该初始化函数为滤波器的每个通道设置(K,K)插值核。输出特征图的最终输出shape为(B,C,factor*H,factor*W)。注意学习率和权重衰减设为0,以便在训练过程中双线性插值的系数值保持不变
-
-
-
-
+ factor = 2
+ C = 2
+ H = W = 32
+ w_attr = fluid.ParamAttr(
+ learning_rate=0.,
+ regularizer=fluid.regularizer.L2Decay(0.),
+ initializer=fluid.initializer.BilinearInitializer())
+ x = fluid.layers.data(name="data", shape=[4, H, W],
+ dtype="float32")
+ conv_up = fluid.layers.conv2d_transpose(
+ input=x,
+ num_filters=C,
+ output_size=None,
+ filter_size=2 * factor - factor % 2,
+ padding=int(math.ceil((factor - 1) / 2.)),
+ stride=factor,
+ groups=C,
+ param_attr=w_attr,
+ bias_attr=False)
+
+上述代码实现的是将输入x(shape=[-1, 4, H, W])经过转置卷积得到shape=[-1, C, H*factor, W*factor]的输出,num_filters = C和groups = C 表示这是按通道转置的卷积函数,输出通道为C,转置卷积的groups为C。滤波器shape为(C,1,K,K),K为filter_size。该初始化函数为滤波器的每个通道设置(K,K)插值核。输出特征图的最终输出shape为(B,C,factor*H,factor*W)。注意学习率和权重衰减设为0,以便在训练过程中双线性插值的系数值保持不变
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst b/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst
index da9b802f33a88ce642633663c7a0c93abc9a4e7a..78664ec87a1a3daa53bc50f94bd0ab832b8b466e 100644
--- a/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/Bilinear_cn.rst
@@ -1,10 +1,16 @@
-.. _cn_api_fluid_initializer_Bilinear:
-
-Bilinear
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.Bilinear
-
-``BilinearInitializer`` 的别名
-
-
+.. _cn_api_fluid_initializer_Bilinear:
+
+Bilinear
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.Bilinear
+
+:alias_main: paddle.nn.initializer.Bilinear
+:alias: paddle.nn.initializer.Bilinear
+:old_api: paddle.fluid.initializer.Bilinear
+
+
+
+``BilinearInitializer`` 的别名
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst
index a73fab89ddd7f82ce1d203fdcf5fdbf01b0784ac..f5acebd8919da242f5eac12e8349a7d1c606edfa 100644
--- a/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/ConstantInitializer_cn.rst
@@ -5,6 +5,9 @@ ConstantInitializer
.. py:class:: paddle.fluid.initializer.ConstantInitializer(value=0.0, force_cpu=False)
+
+
+
该接口为常量初始化函数,用于权重初始化,通过输入的value值初始化输入变量;
参数:
diff --git a/doc/fluid/api_cn/initializer_cn/Constant_cn.rst b/doc/fluid/api_cn/initializer_cn/Constant_cn.rst
index 5ffd7cc8858f399e14c7a68d06e9d9204c6b66e8..fdc567bd1b7ee4c9ddae0e13c018b54e0f2fe7e1 100644
--- a/doc/fluid/api_cn/initializer_cn/Constant_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/Constant_cn.rst
@@ -1,10 +1,16 @@
-.. _cn_api_fluid_initializer_Constant:
-
-Constant
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.Constant
-
-``ConstantInitializer`` 的别名
-
-
+.. _cn_api_fluid_initializer_Constant:
+
+Constant
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.Constant
+
+:alias_main: paddle.nn.initializer.Constant
+:alias: paddle.nn.initializer.Constant
+:old_api: paddle.fluid.initializer.Constant
+
+
+
+``ConstantInitializer`` 的别名
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst
index d87e53642fc0ac22ab7141f20793cd31a40b1363..ac42f93ff6bf15bb95d7a1d6db68ba96705d8eca 100644
--- a/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/MSRAInitializer_cn.rst
@@ -1,46 +1,49 @@
-.. _cn_api_fluid_initializer_MSRAInitializer:
-
-MSRAInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0)
-
-该接口实现MSRA方式的权重初始化(a.k.a. Kaiming初始化)
-
-该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文: `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。
-可以选择使用均匀分布或者正太分布初始化权重;
-在均匀分布中,范围为[-x,x],其中:
-
-.. math::
-
- x = \sqrt{\frac{6.0}{fan\_in}}
-
-在正态分布中,均值为0,标准差为:
-
-.. math::
-
- \sqrt{\frac{2.0}{fan\_in}}
-
-参数:
- - **uniform** (bool) - 为True表示使用均匀分布,为False表示使用正态分布
- - **fan_in** (float16|float32) - MSRAInitializer的fan_in。如果为None,fan_in沿伸自变量,多设置为None
- - **seed** (int32) - 随机种子
+.. _cn_api_fluid_initializer_MSRAInitializer:
+
+MSRAInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0)
+
+
+
+
+该接口实现MSRA方式的权重初始化(a.k.a. Kaiming初始化)
+
+该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文: `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。
+可以选择使用均匀分布或者正态分布初始化权重;
+在均匀分布中,范围为[-x,x],其中:
+
+.. math::
+
+ x = \sqrt{\frac{6.0}{fan\_in}}
+
+在正态分布中,均值为0,标准差为:
+
+.. math::
+
+ \sqrt{\frac{2.0}{fan\_in}}
+
+参数:
+ - **uniform** (bool) - 为True表示使用均匀分布,为False表示使用正态分布
+ - **fan_in** (float16|float32) - MSRAInitializer的fan_in。如果为None,fan_in沿伸自变量,多设置为None
+ - **seed** (int32) - 随机种子
返回:对象
-
-.. note::
-
- 在大多数情况下推荐设置fan_in为None
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
- fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.MSRAInitializer(uniform=False))
-
-
-
-
-
+
+.. note::
+
+ 在大多数情况下推荐设置fan_in为None
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
+ fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.MSRAInitializer(uniform=False))
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst b/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst
index b795ebc826c9a98799a7c699f5e630ffbbded7da..00ff2f29bc574fe3aa24d401fb05783eb23b01c1 100644
--- a/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/MSRA_cn.rst
@@ -1,9 +1,15 @@
-.. _cn_api_fluid_initializer_MSRA:
-
-MSRA
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.MSRA
-
-``MSRAInitializer`` 的别名
-
+.. _cn_api_fluid_initializer_MSRA:
+
+MSRA
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.MSRA
+
+:alias_main: paddle.nn.initializer.MSRA
+:alias: paddle.nn.initializer.MSRA
+:old_api: paddle.fluid.initializer.MSRA
+
+
+
+``MSRAInitializer`` 的别名
+
diff --git a/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst
index 3a8972cc8c89cb171f71bb617bc72feff91617ad..3574b444b4cbe3684c6f9210892655a1b18b8c3c 100644
--- a/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/NormalInitializer_cn.rst
@@ -1,25 +1,28 @@
-.. _cn_api_fluid_initializer_NormalInitializer:
-
-NormalInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.NormalInitializer(loc=0.0, scale=1.0, seed=0)
-
-随机正态(高斯)分布初始化函数
-
-参数:
- - **loc** (float16|float32) - 正态分布的平均值
- - **scale** (float16|float32) - 正态分布的标准差
- - **seed** (int32) - 随机种子
-
-返回:对象
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
- fc = fluid.layers.fc(input=x, size=10,
- param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
-
+.. _cn_api_fluid_initializer_NormalInitializer:
+
+NormalInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.NormalInitializer(loc=0.0, scale=1.0, seed=0)
+
+
+
+
+随机正态(高斯)分布初始化函数
+
+参数:
+ - **loc** (float16|float32) - 正态分布的平均值
+ - **scale** (float16|float32) - 正态分布的标准差
+ - **seed** (int32) - 随机种子
+
+返回:对象
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
+ fc = fluid.layers.fc(input=x, size=10,
+ param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
+
diff --git a/doc/fluid/api_cn/initializer_cn/Normal_cn.rst b/doc/fluid/api_cn/initializer_cn/Normal_cn.rst
index db6f424a5c94987760923d25de1730303a71f86f..a7ae0fe063be50f8af93a4197f68c09157a17733 100644
--- a/doc/fluid/api_cn/initializer_cn/Normal_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/Normal_cn.rst
@@ -1,10 +1,16 @@
-.. _cn_api_fluid_initializer_Normal:
-
-Normal
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.Normal
-
-``NormalInitializer`` 的别名
-
-
+.. _cn_api_fluid_initializer_Normal:
+
+Normal
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.Normal
+
+:alias_main: paddle.nn.initializer.Normal
+:alias: paddle.nn.initializer.Normal
+:old_api: paddle.fluid.initializer.Normal
+
+
+
+``NormalInitializer`` 的别名
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst
index 1e495ad97ca5cf87d8bb126f4665231aad85f921..7c0365ccba17c21f28907047bb902e25fb0df43b 100644
--- a/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/NumpyArrayInitializer_cn.rst
@@ -1,26 +1,30 @@
-.. _cn_api_fluid_initializer_NumpyArrayInitializer:
-
-NumpyArrayInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.NumpyArrayInitializer(value)
-
-该OP使用Numpy型数组来初始化参数变量。
-
-参数:
- - **value** (numpy) - 用于初始化变量的一个Numpy型数组。
-
-返回:张量(Tensor)
-
-返回类型:变量(Variable)
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- x = fluid.layers.data(name="x", shape=[5], dtype='float32')
- fc = fluid.layers.fc(input=x, size=10,
- param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
-
-
+.. _cn_api_fluid_initializer_NumpyArrayInitializer:
+
+NumpyArrayInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.NumpyArrayInitializer(value)
+
+
+
+
+该OP使用Numpy型数组来初始化参数变量。
+
+参数:
+ - **value** (numpy) - 用于初始化变量的一个Numpy型数组。
+
+返回:张量(Tensor)
+
+返回类型:变量(Variable)
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy
+ x1 = fluid.data(name="x1", shape=[2, 1], dtype='float32')
+ fc = fluid.layers.fc(input=x1, size=10,
+ param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst
index 8bf4f08af663e4142e8ad8b41f259563f3409cc6..4378efd743426e1ea3453c2ad43d986aca2f50f1 100644
--- a/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/TruncatedNormalInitializer_cn.rst
@@ -1,32 +1,35 @@
-.. _cn_api_fluid_initializer_TruncatedNormalInitializer:
-
-TruncatedNormalInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.TruncatedNormalInitializer(loc=0.0, scale=1.0, seed=0)
-
-Random Truncated Normal(高斯)分布初始化函数
-
-参数:
- - **loc** (float16|float32) - 正态分布的平均值
- - **scale** (float16|float32) - 正态分布的标准差
- - **seed** (int32) - 随机种子
-
-返回:对象
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[1], dtype='float32')
- fc = fluid.layers.fc(input=x, size=10,
- param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
-
-
-
-
-
-
-
-
+.. _cn_api_fluid_initializer_TruncatedNormalInitializer:
+
+TruncatedNormalInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.TruncatedNormalInitializer(loc=0.0, scale=1.0, seed=0)
+
+
+
+
+Random Truncated Normal(高斯)分布初始化函数
+
+参数:
+ - **loc** (float16|float32) - 正态分布的平均值
+ - **scale** (float16|float32) - 正态分布的标准差
+ - **seed** (int32) - 随机种子
+
+返回:对象
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ x = fluid.layers.data(name='x', shape=[1], dtype='float32')
+ fc = fluid.layers.fc(input=x, size=10,
+ param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
+
+
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst b/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst
index 4cec1290655b963b764e88f19034da0f657815d6..d36b1c53f17417cc2f06b6641a16ed4de7a0b6f7 100644
--- a/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/TruncatedNormal_cn.rst
@@ -1,10 +1,16 @@
-.. _cn_api_fluid_initializer_TruncatedNormal:
-
-TruncatedNormal
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.TruncatedNormal
-
-``TruncatedNormalInitializer`` 的别名
-
-
+.. _cn_api_fluid_initializer_TruncatedNormal:
+
+TruncatedNormal
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.TruncatedNormal
+
+:alias_main: paddle.nn.initializer.TruncatedNormal
+:alias: paddle.nn.initializer.TruncatedNormal
+:old_api: paddle.fluid.initializer.TruncatedNormal
+
+
+
+``TruncatedNormalInitializer`` 的别名
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst
index ec6e624973328ec1a0d5bbf09ba47e25e034e338..873451e6710b00a7cc7394634f4f7842bbca5ab6 100644
--- a/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/UniformInitializer_cn.rst
@@ -1,32 +1,35 @@
-.. _cn_api_fluid_initializer_UniformInitializer:
-
-UniformInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0)
-
-随机均匀分布初始化器
-
-参数:
- - **low** (float16|float32) - 下界
- - **high** (float16|float32) - 上界
- - **seed** (int32) - 随机种子
-
-返回:对象
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[1], dtype='float32')
- fc = fluid.layers.fc(input=x, size=10,
- param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
-
-
-
-
-
-
-
-
+.. _cn_api_fluid_initializer_UniformInitializer:
+
+UniformInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0)
+
+
+
+
+随机均匀分布初始化器
+
+参数:
+ - **low** (float16|float32) - 下界
+ - **high** (float16|float32) - 上界
+ - **seed** (int32) - 随机种子
+
+返回:对象
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ x = fluid.layers.data(name='x', shape=[1], dtype='float32')
+ fc = fluid.layers.fc(input=x, size=10,
+ param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
+
+
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst b/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst
index 81f0b1647741b268aae93b6d3b287eeabacde4ce..48a7efeeeddd34a99effca7c37e1c8dea99bf761 100644
--- a/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/Uniform_cn.rst
@@ -1,11 +1,17 @@
-.. _cn_api_fluid_initializer_Uniform:
-
-Uniform
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.Uniform
-
-``UniformInitializer`` 的别名
-
-
-
+.. _cn_api_fluid_initializer_Uniform:
+
+Uniform
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.Uniform
+
+:alias_main: paddle.nn.initializer.Uniform
+:alias: paddle.nn.initializer.Uniform
+:old_api: paddle.fluid.initializer.Uniform
+
+
+
+``UniformInitializer`` 的别名
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst b/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst
index 4669437654a11f8a6c6b4824ed5c69dbd7757a41..b26098bf14107371c748445ad9493ba0f7331ffb 100644
--- a/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/XavierInitializer_cn.rst
@@ -1,48 +1,51 @@
-.. _cn_api_fluid_initializer_XavierInitializer:
-
-XavierInitializer
--------------------------------
-
-.. py:class:: paddle.fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0)
-
-该类实现Xavier权重初始化方法( Xavier weight initializer),Xavier权重初始化方法出自Xavier Glorot和Yoshua Bengio的论文 `Understanding the difficulty of training deep feedforward neural networks `_
-
-该初始化函数用于保持所有层的梯度尺度几乎一致。
-
-在均匀分布的情况下,取值范围为[-x,x],其中:
-
-.. math::
-
- x = \sqrt{\frac{6.0}{fan\_in+fan\_out}}
-
-正态分布的情况下,均值为0,标准差为:
-
-.. math::
-
- x = \sqrt{\frac{2.0}{fan\_in+fan\_out}}
-
-参数:
- - **uniform** (bool) - 是否用均匀分布,默认为True。如果为False,则使用正态分布。
- - **fan_in** (float) - 当前网络层的输入神经元个数。如果为None,则从变量中推断,默认为None。
- - **fan_out** (float) - 当前网络层的输出神经元个数。如果为None,则从变量中推断,默认为None。
- - **seed** (int) - 随机种子
-
-.. note::
-
- 在大多数情况下推荐将fan_in和fan_out设置为None
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- queries = fluid.layers.data(name='x', shape=[1], dtype='float32')
- fc = fluid.layers.fc(
- input=queries, size=10,
- param_attr=fluid.initializer.Xavier(uniform=False))
-
-
-
-
-
-
+.. _cn_api_fluid_initializer_XavierInitializer:
+
+XavierInitializer
+-------------------------------
+
+.. py:class:: paddle.fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0)
+
+
+
+
+该类实现Xavier权重初始化方法( Xavier weight initializer),Xavier权重初始化方法出自Xavier Glorot和Yoshua Bengio的论文 `Understanding the difficulty of training deep feedforward neural networks `_
+
+该初始化函数用于保持所有层的梯度尺度几乎一致。
+
+在均匀分布的情况下,取值范围为[-x,x],其中:
+
+.. math::
+
+ x = \sqrt{\frac{6.0}{fan\_in+fan\_out}}
+
+正态分布的情况下,均值为0,标准差为:
+
+.. math::
+
+ x = \sqrt{\frac{2.0}{fan\_in+fan\_out}}
+
+参数:
+ - **uniform** (bool) - 是否用均匀分布,默认为True。如果为False,则使用正态分布。
+ - **fan_in** (float) - 当前网络层的输入神经元个数。如果为None,则从变量中推断,默认为None。
+ - **fan_out** (float) - 当前网络层的输出神经元个数。如果为None,则从变量中推断,默认为None。
+ - **seed** (int) - 随机种子
+
+.. note::
+
+ 在大多数情况下推荐将fan_in和fan_out设置为None
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ queries = fluid.layers.data(name='x', shape=[1], dtype='float32')
+ fc = fluid.layers.fc(
+ input=queries, size=10,
+ param_attr=fluid.initializer.Xavier(uniform=False))
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst b/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst
index 69e632a7177a60d736c01a532e36e15ba5c55620..e19b2c7db8dcb80cadf0e7da5f3d10e39ba301d5 100644
--- a/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/Xavier_cn.rst
@@ -1,14 +1,20 @@
-.. _cn_api_fluid_initializer_Xavier:
-
-Xavier
--------------------------------
-
-.. py:attribute:: paddle.fluid.initializer.Xavier
-
-``XavierInitializer`` 的别名
-
-
-
-
-
-
+.. _cn_api_fluid_initializer_Xavier:
+
+Xavier
+-------------------------------
+
+.. py:attribute:: paddle.fluid.initializer.Xavier
+
+:alias_main: paddle.nn.initializer.Xavier
+:alias: paddle.nn.initializer.Xavier
+:old_api: paddle.fluid.initializer.Xavier
+
+
+
+``XavierInitializer`` 的别名
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst b/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst
index 7957a26a5a9de6b1e466a68a22f4a0c0db7696b5..8b3cf60ae1fe5aeb91431ad49fb71801f47b1c57 100644
--- a/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/force_init_on_cpu_cn.rst
@@ -1,31 +1,34 @@
-.. _cn_api_fluid_initializer_force_init_on_cpu:
-
-force_init_on_cpu
--------------------------------
-
-.. py:function:: paddle.fluid.initializer.force_init_on_cpu()
-
-该接口获得一个是否强制在CPU上初始化变量的布尔型标志位。
-
-返回:状态,是否应强制在CPU上强制进行变量初始化
-
-返回类型:bool
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- if fluid.initializer.force_init_on_cpu():
- step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32')
-
-
-
-
-
-
-
-
-
-
-
+.. _cn_api_fluid_initializer_force_init_on_cpu:
+
+force_init_on_cpu
+-------------------------------
+
+.. py:function:: paddle.fluid.initializer.force_init_on_cpu()
+
+
+
+
+该接口获得一个是否强制在CPU上初始化变量的布尔型标志位。
+
+返回:状态,是否应强制在CPU上强制进行变量初始化
+
+返回类型:bool
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ if fluid.initializer.force_init_on_cpu():
+ step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32')
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst b/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst
index 00e7b1d86e8d30ea4418d41f993a5002e886ad6a..db51f83b5dc08536236586f85b251bb92878df5c 100644
--- a/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst
+++ b/doc/fluid/api_cn/initializer_cn/init_on_cpu_cn.rst
@@ -1,21 +1,24 @@
-.. _cn_api_fluid_initializer_init_on_cpu:
-
-init_on_cpu
--------------------------------
-
-.. py:function:: paddle.fluid.initializer.init_on_cpu()
-
-该接口设置强制变量在 cpu 上初始化。
-
-返回:无
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- with fluid.initializer.init_on_cpu():
- step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32')
-
-
-
+.. _cn_api_fluid_initializer_init_on_cpu:
+
+init_on_cpu
+-------------------------------
+
+.. py:function:: paddle.fluid.initializer.init_on_cpu()
+
+
+
+
+该接口设置强制变量在 cpu 上初始化。
+
+返回:无
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ with fluid.initializer.init_on_cpu():
+ step = fluid.layers.create_global_var(shape=[2,3], value=1.0, dtype='float32')
+
+
+
diff --git a/doc/fluid/api_cn/io_cn.rst b/doc/fluid/api_cn/io_cn.rst
index 5115abb0da44ee9289895ece7f20854f70ebb0a1..1bee21a710d0366a1f7db206c18e377fc64bfe26 100644
--- a/doc/fluid/api_cn/io_cn.rst
+++ b/doc/fluid/api_cn/io_cn.rst
@@ -1,6 +1,6 @@
=======================
-fluid.io
+paddle.io
=======================
@@ -10,13 +10,18 @@ fluid.io
:maxdepth: 1
io_cn/batch_cn.rst
+ io_cn/BatchSampler_cn.rst
io_cn/buffered_cn.rst
io_cn/cache_cn.rst
io_cn/chain_cn.rst
io_cn/compose_cn.rst
io_cn/ComposeNotAligned_cn.rst
- io_cn/DataLoader_cn.rst
+ io_cn/DataLoader_cn.rst
+ io_cn/DataLoader_cn.rst
+ io_cn/Dataset_cn.rst
io_cn/firstn_cn.rst
+ io_cn/get_program_parameter_cn.rst
+ io_cn/get_program_persistable_vars_cn.rst
io_cn/load_cn.rst
io_cn/load_inference_model_cn.rst
io_cn/load_params_cn.rst
@@ -34,4 +39,3 @@ fluid.io
io_cn/set_program_state_cn.rst
io_cn/shuffle_cn.rst
io_cn/xmap_readers_cn.rst
-
diff --git a/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst b/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d97e1af250b27c1b08959581b1a3981979a38d32
--- /dev/null
+++ b/doc/fluid/api_cn/io_cn/BatchSampler_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_io_cn_BatchSampler:
+
+BatchSampler
+-------------------------------
+:doc_source: paddle.fluid.dataloader.BatchSampler
+
+
diff --git a/doc/fluid/api_cn/io_cn/DataLoader_cn.rst b/doc/fluid/api_cn/io_cn/DataLoader_cn.rst
index aa01b837c7811089aab976819c6b2be178bf0d94..e8e1b68d3aadb0c9f5e3f958f4b096313e60c6e5 100644
--- a/doc/fluid/api_cn/io_cn/DataLoader_cn.rst
+++ b/doc/fluid/api_cn/io_cn/DataLoader_cn.rst
@@ -6,7 +6,13 @@ DataLoader
.. py:class:: paddle.fluid.io.DataLoader
-.. py:method:: from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False)
+
+
+
+.. py:method:: from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False, drop_last=True)
+
+.. note::
+ 框架保证DataLoader的数据加载顺序与用户提供的数据源读取顺序一致。
创建一个DataLoader对象用于加载Python生成器产生的数据。数据会由Python线程预先读取,并异步送入一个队列中。
@@ -26,12 +32,13 @@ DataLoader
- **iterable** (bool) - 所创建的DataLoader对象是否可迭代。
- **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。
- **use_multiprocess** (bool) - 设置是否是用多进程加速动态图的数据载入过程。注意:该参数的设置仅在动态图模式下有效, 在静态图模式下,该参数设置与否均无任何影响。默认值为False。
+ - **drop_last** (bool): 是否丢弃最后的不足CPU/GPU设备数的批次。默认值为True。在网络训练时,用户不能设置drop_last=False,此时所有CPU/GPU设备均应从DataLoader中读取到数据。在网络预测时,用户可以设置drop_last=False,此时最后不足CPU/GPU设备数的批次可以进行预测。
返回: 被创建的DataLoader对象
返回类型: loader (DataLoader)
-**代码示例**
+**代码示例 1**
.. code-block:: python
@@ -165,6 +172,50 @@ DataLoader
assert relu.shape == [BATCH_SIZE, 784]
+**代码示例 2**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+ import os
+
+ # We use 2 CPU cores to run inference network
+ os.environ['CPU_NUM'] = '2'
+
+ # The data source has only 3 batches, which can not be
+ # divided evenly to each CPU core
+ def batch_generator():
+ for i in range(3):
+ yield np.array([i+1]).astype('float32'),
+
+ x = fluid.data(name='x', shape=[None], dtype='float32')
+ y = x * x
+
+ def run_inference(drop_last):
+ loader = fluid.io.DataLoader.from_generator(feed_list=[x],
+ capacity=8, drop_last=drop_last)
+ loader.set_batch_generator(batch_generator, fluid.cpu_places())
+
+ exe = fluid.Executor(fluid.CPUPlace())
+ prog = fluid.CompiledProgram(fluid.default_main_program())
+ prog = prog.with_data_parallel()
+
+ result = []
+ for data in loader():
+ each_ret, = exe.run(prog, feed=data, fetch_list=[y])
+ result.extend(each_ret)
+ return result
+
+ # Set drop_last to True, so that the last batch whose
+ # number is less than CPU core number would be discarded.
+ print(run_inference(drop_last=True)) # [1.0, 4.0]
+
+ # Set drop_last to False, so that the last batch whose
+ # number is less than CPU core number can be tested.
+ print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
+
+
.. py:method:: from_dataset(dataset, places, drop_last=True)
创建一个DataLoader对象用于加载Dataset产生的数据。目前,Dataset仅支持Linux系统下使用。
diff --git a/doc/fluid/api_cn/io_cn/Dataset_cn.rst b/doc/fluid/api_cn/io_cn/Dataset_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9cc4bf215405232188e577ad074d784ef2c06424
--- /dev/null
+++ b/doc/fluid/api_cn/io_cn/Dataset_cn.rst
@@ -0,0 +1,7 @@
+.. _cn_api_io_cn_Dataset:
+
+Dataset
+-------------------------------
+:doc_source: paddle.fluid.dataloader.Dataset
+
+
diff --git a/doc/fluid/api_cn/io_cn/PyReader_cn.rst b/doc/fluid/api_cn/io_cn/PyReader_cn.rst
index 9f9fb25f74fd6a6b7e792ec7ef12903dee00e80d..10920cb264f87170a5394d4a4be70a74596c6c02 100644
--- a/doc/fluid/api_cn/io_cn/PyReader_cn.rst
+++ b/doc/fluid/api_cn/io_cn/PyReader_cn.rst
@@ -1,386 +1,389 @@
-.. _cn_api_fluid_io_PyReader:
-
-PyReader
--------------------------------
-
-.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False)
-
-
-在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。
-
-参数:
- - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。
- - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。
- - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。
- - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。
- - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。
-
-
-返回: 被创建的reader对象
-
-返回类型: reader (Reader)
-
-
-**代码示例**
-
-1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 5
- BATCH_SIZE = 3
-
- def network(image, label):
- # 用户定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=image, size=10, act='softmax')
- return fluid.layers.cross_entropy(input=predict, label=label)
-
- def reader_creator_random_image_and_label(height, width):
- def reader():
- for i in range(ITER_NUM):
- fake_image = np.random.uniform(low=0,
- high=255,
- size=[height, width])
- fake_label = np.ones([1])
- yield fake_image, fake_label
- return reader
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
-
- reader = fluid.io.PyReader(feed_list=[image, label],
- capacity=4,
- iterable=False)
-
- user_defined_reader = reader_creator_random_image_and_label(784, 784)
- reader.decorate_sample_list_generator(
- paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
-
- loss = network(image, label)
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
- for i in range(EPOCH_NUM):
- reader.start()
- while True:
- try:
- executor.run(feed=None)
- except fluid.core.EOFException:
- reader.reset()
- break
-
-
-2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 5
- BATCH_SIZE = 10
-
- def network(image, label):
- # 用户定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=image, size=10, act='softmax')
- return fluid.layers.cross_entropy(input=predict, label=label)
-
- def reader_creator_random_image(height, width):
- def reader():
- for i in range(ITER_NUM):
- fake_image = np.random.uniform(low=0, high=255, size=[height, width]),
- fake_label = np.ones([1])
- yield fake_image, fake_label
- return reader
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
-
- user_defined_reader = reader_creator_random_image(784, 784)
- reader.decorate_sample_list_generator(
- paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
- fluid.core.CPUPlace())
- loss = network(image, label)
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
-
- for _ in range(EPOCH_NUM):
- for data in reader():
- executor.run(feed=data, fetch_list=[loss])
-
-3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 5
- BATCH_SIZE = 10
-
- def reader_creator_random_image(height, width):
- def reader():
- for i in range(ITER_NUM):
- yield np.random.uniform(low=0, high=255, size=[height, width]), \
- np.random.random_integers(low=0, high=9, size=[1])
- return reader
-
- place = fluid.CPUPlace()
- with fluid.dygraph.guard(place):
- py_reader = fluid.io.PyReader(capacity=2, return_list=True)
- user_defined_reader = reader_creator_random_image(784, 784)
- py_reader.decorate_sample_list_generator(
- paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
- place)
- for image, label in py_reader():
- relu = fluid.layers.relu(image)
-
-.. py:method:: start()
-
-启动数据输入线程。只能在reader对象不可迭代时调用。
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- BATCH_SIZE = 10
-
- def generator():
- for i in range(5):
- yield np.random.uniform(low=0, high=255, size=[784, 784]),
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
- reader.decorate_sample_list_generator(
- paddle.batch(generator, batch_size=BATCH_SIZE))
-
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
- for i in range(3):
- reader.start()
- while True:
- try:
- executor.run(feed=None)
- except fluid.core.EOFException:
- reader.reset()
- break
-
-.. py:method:: reset()
-
-当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- BATCH_SIZE = 10
-
- def generator():
- for i in range(5):
- yield np.random.uniform(low=0, high=255, size=[784, 784]),
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
- reader.decorate_sample_list_generator(
- paddle.batch(generator, batch_size=BATCH_SIZE))
-
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
- for i in range(3):
- reader.start()
- while True:
- try:
- executor.run(feed=None)
- except fluid.core.EOFException:
- reader.reset()
- break
-
-.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None)
-
-设置PyReader对象的数据源。
-
-提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。
-
-当PyReader对象可迭代时,必须设置 ``places`` 。
-
-如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。
-
-参数:
- - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray)
- - **batch_size** (int) – batch size,必须大于0
- - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch
- - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 15
- BATCH_SIZE = 3
-
- def network(image, label):
- # 用户定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=image, size=10, act='softmax')
- return fluid.layers.cross_entropy(input=predict, label=label)
-
- def random_image_and_label_generator(height, width):
- def generator():
- for i in range(ITER_NUM):
- fake_image = np.random.uniform(low=0,
- high=255,
- size=[height, width])
- fake_label = np.array([1])
- yield fake_image, fake_label
- return generator
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
-
- user_defined_generator = random_image_and_label_generator(784, 784)
- reader.decorate_sample_generator(user_defined_generator,
- batch_size=BATCH_SIZE,
- places=[fluid.CPUPlace()])
- loss = network(image, label)
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
-
- for _ in range(EPOCH_NUM):
- for data in reader():
- executor.run(feed=data, fetch_list=[loss])
-
-.. py:method:: decorate_sample_list_generator(reader, places=None)
-
-设置PyReader对象的数据源。
-
-提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。
-
-当PyReader对象不可迭代时,必须设置 ``places`` 。
-
-参数:
- - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器
- - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 15
- BATCH_SIZE = 3
-
- def network(image, label):
- # 用户定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=image, size=10, act='softmax')
- return fluid.layers.cross_entropy(input=predict, label=label)
-
- def random_image_and_label_generator(height, width):
- def generator():
- for i in range(ITER_NUM):
- fake_image = np.random.uniform(low=0,
- high=255,
- size=[height, width])
- fake_label = np.ones([1])
- yield fake_image, fake_label
- return generator
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
-
- user_defined_generator = random_image_and_label_generator(784, 784)
- reader.decorate_sample_list_generator(
- paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
- fluid.core.CPUPlace())
- loss = network(image, label)
- executor = fluid.Executor(fluid.core.CPUPlace())
- executor.run(fluid.default_startup_program())
-
- for _ in range(EPOCH_NUM):
- for data in reader():
- executor.run(feed=data, fetch_list=[loss])
-
-.. py:method:: decorate_batch_generator(reader, places=None)
-
-设置PyReader对象的数据源。
-
-提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。
-
-当PyReader对象不可迭代时,必须设置 ``places`` 。
-
-参数:
- - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器
- - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import numpy as np
-
- EPOCH_NUM = 3
- ITER_NUM = 15
- BATCH_SIZE = 3
-
- def network(image, label):
- # 用户定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=image, size=10, act='softmax')
- return fluid.layers.cross_entropy(input=predict, label=label)
-
- def random_image_and_label_generator(height, width):
- def generator():
- for i in range(ITER_NUM):
- batch_image = np.random.uniform(low=0,
- high=255,
- size=[BATCH_SIZE, height, width])
- batch_label = np.ones([BATCH_SIZE, 1])
- batch_image = batch_image.astype('float32')
- batch_label = batch_label.astype('int64')
- yield batch_image, batch_label
- return generator
-
- image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
- reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
-
- user_defined_generator = random_image_and_label_generator(784, 784)
- reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
-
- loss = network(image, label)
- executor = fluid.Executor(fluid.CPUPlace())
- executor.run(fluid.default_startup_program())
-
- for _ in range(EPOCH_NUM):
- for data in reader():
- executor.run(feed=data, fetch_list=[loss])
-
-
-.. py:method:: next()
-
-获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。
+.. _cn_api_fluid_io_PyReader:
+
+PyReader
+-------------------------------
+
+.. py:class:: paddle.fluid.io.PyReader(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False)
+
+
+
+
+
+在python中为数据输入创建一个reader对象。将使用python线程预取数据,并将其异步插入队列。当调用Executor.run(…)时,将自动提取队列中的数据。
+
+参数:
+ - **feed_list** (list(Variable)|tuple(Variable)) - feed变量列表,由 ``fluid.layers.data()`` 创建。
+ - **capacity** (int) - PyReader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。
+ - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader`` 。若use_double_buffer=True,PyReader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。
+ - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。
+ - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。
+
+
+返回: 被创建的reader对象
+
+返回类型: reader (Reader)
+
+
+**代码示例**
+
+1.如果iterable=False,则创建的PyReader对象几乎与 ``fluid.layers.py_reader()`` 相同。算子将被插入program中。用户应该在每个epoch之前调用 ``start()`` ,并在epoch结束时捕获 ``Executor.run()`` 抛出的 ``fluid.core.EOFException`` 。一旦捕获到异常,用户应该调用 ``reset()`` 手动重置reader。
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 5
+ BATCH_SIZE = 3
+
+ def network(image, label):
+ # 用户定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=image, size=10, act='softmax')
+ return fluid.layers.cross_entropy(input=predict, label=label)
+
+ def reader_creator_random_image_and_label(height, width):
+ def reader():
+ for i in range(ITER_NUM):
+ fake_image = np.random.uniform(low=0,
+ high=255,
+ size=[height, width])
+ fake_label = np.ones([1])
+ yield fake_image, fake_label
+ return reader
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+
+ reader = fluid.io.PyReader(feed_list=[image, label],
+ capacity=4,
+ iterable=False)
+
+ user_defined_reader = reader_creator_random_image_and_label(784, 784)
+ reader.decorate_sample_list_generator(
+ paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
+
+ loss = network(image, label)
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+ for i in range(EPOCH_NUM):
+ reader.start()
+ while True:
+ try:
+ executor.run(feed=None)
+ except fluid.core.EOFException:
+ reader.reset()
+ break
+
+
+2.如果iterable=True,则创建的PyReader对象与程序分离。程序中不会插入任何算子。在本例中,创建的reader是一个python生成器,它是可迭代的。用户应将从PyReader对象生成的数据输入 ``Executor.run(feed=...)`` 。
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 5
+ BATCH_SIZE = 10
+
+ def network(image, label):
+ # 用户定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=image, size=10, act='softmax')
+ return fluid.layers.cross_entropy(input=predict, label=label)
+
+ def reader_creator_random_image(height, width):
+ def reader():
+ for i in range(ITER_NUM):
+ fake_image = np.random.uniform(low=0, high=255, size=[height, width]),
+ fake_label = np.ones([1])
+ yield fake_image, fake_label
+ return reader
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
+
+ user_defined_reader = reader_creator_random_image(784, 784)
+ reader.decorate_sample_list_generator(
+ paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
+ fluid.core.CPUPlace())
+ loss = network(image, label)
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+
+ for _ in range(EPOCH_NUM):
+ for data in reader():
+ executor.run(feed=data, fetch_list=[loss])
+
+3. return_list=True,返回值将用list表示而非dict,通常用于动态图模式中。
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 5
+ BATCH_SIZE = 10
+
+ def reader_creator_random_image(height, width):
+ def reader():
+ for i in range(ITER_NUM):
+ yield np.random.uniform(low=0, high=255, size=[height, width]), \
+ np.random.random_integers(low=0, high=9, size=[1])
+ return reader
+
+ place = fluid.CPUPlace()
+ with fluid.dygraph.guard(place):
+ py_reader = fluid.io.PyReader(capacity=2, return_list=True)
+ user_defined_reader = reader_creator_random_image(784, 784)
+ py_reader.decorate_sample_list_generator(
+ paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
+ place)
+ for image, label in py_reader():
+ relu = fluid.layers.relu(image)
+
+.. py:method:: start()
+
+启动数据输入线程。只能在reader对象不可迭代时调用。
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ BATCH_SIZE = 10
+
+ def generator():
+ for i in range(5):
+ yield np.random.uniform(low=0, high=255, size=[784, 784]),
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
+ reader.decorate_sample_list_generator(
+ paddle.batch(generator, batch_size=BATCH_SIZE))
+
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+ for i in range(3):
+ reader.start()
+ while True:
+ try:
+ executor.run(feed=None)
+ except fluid.core.EOFException:
+ reader.reset()
+ break
+
+.. py:method:: reset()
+
+当 ``fluid.core.EOFException`` 抛出时重置reader对象。只能在reader对象不可迭代时调用。
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ BATCH_SIZE = 10
+
+ def generator():
+ for i in range(5):
+ yield np.random.uniform(low=0, high=255, size=[784, 784]),
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
+ reader.decorate_sample_list_generator(
+ paddle.batch(generator, batch_size=BATCH_SIZE))
+
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+ for i in range(3):
+ reader.start()
+ while True:
+ try:
+ executor.run(feed=None)
+ except fluid.core.EOFException:
+ reader.reset()
+ break
+
+.. py:method:: decorate_sample_generator(sample_generator, batch_size, drop_last=True, places=None)
+
+设置PyReader对象的数据源。
+
+提供的 ``sample_generator`` 应该是一个python生成器,它生成的数据类型应为list(numpy.ndarray)。
+
+当PyReader对象可迭代时,必须设置 ``places`` 。
+
+如果所有的输入都没有LOD,这个方法比 ``decorate_sample_list_generator(paddle.batch(sample_generator, ...))`` 更快。
+
+参数:
+ - **sample_generator** (generator) – Python生成器,yield 类型为list(numpy.ndarray)
+ - **batch_size** (int) – batch size,必须大于0
+ - **drop_last** (bool) – 当样本数小于batch数量时,是否删除最后一个batch
+ - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 15
+ BATCH_SIZE = 3
+
+ def network(image, label):
+ # 用户定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=image, size=10, act='softmax')
+ return fluid.layers.cross_entropy(input=predict, label=label)
+
+ def random_image_and_label_generator(height, width):
+ def generator():
+ for i in range(ITER_NUM):
+ fake_image = np.random.uniform(low=0,
+ high=255,
+ size=[height, width])
+ fake_label = np.array([1])
+ yield fake_image, fake_label
+ return generator
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
+
+ user_defined_generator = random_image_and_label_generator(784, 784)
+ reader.decorate_sample_generator(user_defined_generator,
+ batch_size=BATCH_SIZE,
+ places=[fluid.CPUPlace()])
+ loss = network(image, label)
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+
+ for _ in range(EPOCH_NUM):
+ for data in reader():
+ executor.run(feed=data, fetch_list=[loss])
+
+.. py:method:: decorate_sample_list_generator(reader, places=None)
+
+设置PyReader对象的数据源。
+
+提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型的批处理数据。
+
+当PyReader对象不可迭代时,必须设置 ``places`` 。
+
+参数:
+ - **reader** (generator) – 返回列表(numpy.ndarray)类型的批处理数据的Python生成器
+ - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 15
+ BATCH_SIZE = 3
+
+ def network(image, label):
+ # 用户定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=image, size=10, act='softmax')
+ return fluid.layers.cross_entropy(input=predict, label=label)
+
+ def random_image_and_label_generator(height, width):
+ def generator():
+ for i in range(ITER_NUM):
+ fake_image = np.random.uniform(low=0,
+ high=255,
+ size=[height, width])
+ fake_label = np.ones([1])
+ yield fake_image, fake_label
+ return generator
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
+
+ user_defined_generator = random_image_and_label_generator(784, 784)
+ reader.decorate_sample_list_generator(
+ paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
+ fluid.core.CPUPlace())
+ loss = network(image, label)
+ executor = fluid.Executor(fluid.core.CPUPlace())
+ executor.run(fluid.default_startup_program())
+
+ for _ in range(EPOCH_NUM):
+ for data in reader():
+ executor.run(feed=data, fetch_list=[loss])
+
+.. py:method:: decorate_batch_generator(reader, places=None)
+
+设置PyReader对象的数据源。
+
+提供的 ``reader`` 应该是一个python生成器,它生成列表(numpy.ndarray)类型或LoDTensor类型的批处理数据。
+
+当PyReader对象不可迭代时,必须设置 ``places`` 。
+
+参数:
+ - **reader** (generator) – 返回LoDTensor类型的批处理数据的Python生成器
+ - **places** (None|list(CUDAPlace)|list(CPUPlace)) – 位置列表。当PyReader可迭代时必须被提供
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ EPOCH_NUM = 3
+ ITER_NUM = 15
+ BATCH_SIZE = 3
+
+ def network(image, label):
+ # 用户定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=image, size=10, act='softmax')
+ return fluid.layers.cross_entropy(input=predict, label=label)
+
+ def random_image_and_label_generator(height, width):
+ def generator():
+ for i in range(ITER_NUM):
+ batch_image = np.random.uniform(low=0,
+ high=255,
+ size=[BATCH_SIZE, height, width])
+ batch_label = np.ones([BATCH_SIZE, 1])
+ batch_image = batch_image.astype('float32')
+ batch_label = batch_label.astype('int64')
+ yield batch_image, batch_label
+ return generator
+
+ image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
+ reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
+
+ user_defined_generator = random_image_and_label_generator(784, 784)
+ reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
+
+ loss = network(image, label)
+ executor = fluid.Executor(fluid.CPUPlace())
+ executor.run(fluid.default_startup_program())
+
+ for _ in range(EPOCH_NUM):
+ for data in reader():
+ executor.run(feed=data, fetch_list=[loss])
+
+
+.. py:method:: next()
+
+获取下一个数据。用户不应直接调用此方法。此方法用于PaddlePaddle框架内部实现Python 2.x的迭代器协议。
diff --git a/doc/fluid/api_cn/io_cn/batch_cn.rst b/doc/fluid/api_cn/io_cn/batch_cn.rst
index d93e4b1d5166a7bb1f73f31f8c9a82c36ae68df1..3e3804abfb573be5a561fa7fb48603a15bf5f87c 100644
--- a/doc/fluid/api_cn/io_cn/batch_cn.rst
+++ b/doc/fluid/api_cn/io_cn/batch_cn.rst
@@ -5,6 +5,9 @@ batch
.. py:function:: paddle.fluid.io.batch(reader, batch_size, drop_last=False)
+
+
+
该接口是一个reader的装饰器。返回的reader将输入reader的数据打包成指定的batch_size大小的批处理数据(batched data)。
参数:
diff --git a/doc/fluid/api_cn/io_cn/buffered_cn.rst b/doc/fluid/api_cn/io_cn/buffered_cn.rst
index 26f7a2c243134217209cd1518a7b5c45e970a506..027c6346f735874d8c9accc38024c09b5940c429 100644
--- a/doc/fluid/api_cn/io_cn/buffered_cn.rst
+++ b/doc/fluid/api_cn/io_cn/buffered_cn.rst
@@ -5,6 +5,9 @@ buffered
.. py:function:: paddle.fluid.io.buffered(reader, size)
+
+
+
创建一个缓存数据读取器,它读取数据并且存储进缓存区,从缓存区读取数据将会加速,只要缓存不是空的。
参数:
diff --git a/doc/fluid/api_cn/io_cn/cache_cn.rst b/doc/fluid/api_cn/io_cn/cache_cn.rst
index aa37f22044aef34a35359d36b357e538a078ab70..e93e4c85d134c0feb2ff813a6127c89816baed76 100644
--- a/doc/fluid/api_cn/io_cn/cache_cn.rst
+++ b/doc/fluid/api_cn/io_cn/cache_cn.rst
@@ -5,6 +5,9 @@ cache
.. py:function:: paddle.fluid.io.cache(reader)
+
+
+
缓存reader数据到内存中,小心此方法可能会花长时间来处理数据,并且会占用大量内存。 ``reader()`` 只能被调用一次。
参数:
diff --git a/doc/fluid/api_cn/io_cn/chain_cn.rst b/doc/fluid/api_cn/io_cn/chain_cn.rst
index 9b90d3d1c4b27b23982eadfcf8694392961e8252..4a4872d268cc2dfbddad0a6d4720be54e5eb41c9 100644
--- a/doc/fluid/api_cn/io_cn/chain_cn.rst
+++ b/doc/fluid/api_cn/io_cn/chain_cn.rst
@@ -5,6 +5,9 @@ chain
.. py:function:: paddle.fluid.io.chain(*readers)
+
+
+
该接口将多个数据读取器组成一个数据读取器,它依次返回多个数据读取器的输出数据,同时不改变输出数据原先的格式。
举例来说,如果有3个输入读取器且输出分别为[0,0,0]、[10,10,10]和[20,20,20],那么调用该接口产生的新数据读取器的输出为[0,0,0], [10,10,10], [20,20,20]。
diff --git a/doc/fluid/api_cn/io_cn/compose_cn.rst b/doc/fluid/api_cn/io_cn/compose_cn.rst
index 0153ceadc2fc6f158539ab9e5069dd4079d2a118..b4393bab5db375413712c27b60fb0699e4d85370 100644
--- a/doc/fluid/api_cn/io_cn/compose_cn.rst
+++ b/doc/fluid/api_cn/io_cn/compose_cn.rst
@@ -5,6 +5,9 @@ compose
.. py:function:: paddle.fluid.io.compose(*readers, **kwargs)
+
+
+
该接口将多个数据读取器组合为一个数据读取器,返回读取器的输出包含所有输入读取器的输出。
例如:如果输入为三个reader,三个reader的输出分别为:(1,2)、3、(4,5),则组合reader的输出为:(1,2,3,4,5)。
diff --git a/doc/fluid/api_cn/io_cn/firstn_cn.rst b/doc/fluid/api_cn/io_cn/firstn_cn.rst
index 0123e25a19af6acfb729fb1f7eab565cd76d8b64..ba9c1d427ab3ec1946dab0e78f1a2021a712fe94 100644
--- a/doc/fluid/api_cn/io_cn/firstn_cn.rst
+++ b/doc/fluid/api_cn/io_cn/firstn_cn.rst
@@ -5,6 +5,9 @@ firstn
.. py:function:: paddle.fluid.io.firstn(reader, n)
+
+
+
该接口创建一个数据读取器,它可以返回的最大样本数为n。
参数:
diff --git a/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst b/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..43490ac9dff81daad4195593af4a2b4c5e443004
--- /dev/null
+++ b/doc/fluid/api_cn/io_cn/get_program_parameter_cn.rst
@@ -0,0 +1,30 @@
+.. _cn_api_fluid_io_get_program_parameter:
+
+get_program_parameter
+-------------------------------
+
+.. py:function:: paddle.fluid.io.get_program_parameter(program)
+
+:api_attr: 声明式编程模式(静态图)
+
+
+
+该接口从Program中获取所有参数。
+
+参数:
+ - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取参数。
+
+返回: 包含此Program中所有参数的list
+
+返回类型: list
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ data = fluid.data(name="img", shape=[64, 784])
+ w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
+ b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
+ list_para = fluid.io.get_program_parameter( fluid.default_main_program() )
+
diff --git a/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst b/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b7eb886d371c97f09fa8b4e25ba731313f3f9108
--- /dev/null
+++ b/doc/fluid/api_cn/io_cn/get_program_persistable_vars_cn.rst
@@ -0,0 +1,30 @@
+.. _cn_api_fluid_io_get_program_persistable_vars:
+
+get_program_persistable_vars
+-------------------------------
+
+.. py:function:: paddle.fluid.io.get_program_persistable_vars(program)
+
+:api_attr: 声明式编程模式(静态图)
+
+
+
+该接口从Program中获取所有persistable的变量。
+
+参数:
+ - **program** ( :ref:`cn_api_fluid_Program` ) – 从该Program中获取persistable的变量。
+
+返回: 包含此Program中所有persistable的变量
+
+返回类型: list
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ data = fluid.data(name="img", shape=[64, 784])
+ w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
+ b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
+ list_para = fluid.io.get_program_persistable_vars( fluid.default_main_program() )
+
diff --git a/doc/fluid/api_cn/io_cn/load_cn.rst b/doc/fluid/api_cn/io_cn/load_cn.rst
index d702dff7812ecf5a80de86eedb6c6269f3cf83bf..863dd4ea083bd3d663e4e3a4b06eaed787086b2d 100644
--- a/doc/fluid/api_cn/io_cn/load_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_cn.rst
@@ -5,4 +5,11 @@ load
.. py:function:: paddle.fluid.io.load(program, model_path, executor=None, var_list=None)
-``fluid.io.load`` 是 ``fluid.load`` 的别名
+:api_attr: 声明式编程模式(静态图)
+:alias_main: paddle.load
+:alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load
+:old_api: paddle.fluid.io.load
+
+
+
+``fluid.io.load`` 是 :ref:`cn_api_fluid_load` 的别名
diff --git a/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst b/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst
index 6e053d3b80d9b12ab8e42e6df81f851fff326a5d..e53d9661a7bac90805a6e880a2c3046b68caa503 100644
--- a/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_inference_model_cn.rst
@@ -3,10 +3,13 @@
load_inference_model
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.load_inference_model(dirname, executor, model_filename=None, params_filename=None, pserver_endpoints=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
从指定文件路径中加载预测模型(Inference Model),即调用该接口可获得模型结构(Inference Program)和模型参数。若只想加载预训练后的模型参数,请使用 :ref:`cn_api_fluid_io_load_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。
参数:
diff --git a/doc/fluid/api_cn/io_cn/load_params_cn.rst b/doc/fluid/api_cn/io_cn/load_params_cn.rst
index 4c178a482a72b8e6b9907fc8fb87a4ca5a977672..53a5bccc6f2024abf67a06320249a6652266077d 100644
--- a/doc/fluid/api_cn/io_cn/load_params_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_params_cn.rst
@@ -3,10 +3,13 @@
load_params
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.load_params(executor, dirname, main_program=None, filename=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口从指定的 ``main_program`` 中筛选出所有模型参数变量,并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些模型参数进行赋值。
使用 ``dirname`` 指定模型参数的存储路径。若模型参数变量以分离文件的形式存储在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有模型参数存储在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。
diff --git a/doc/fluid/api_cn/io_cn/load_persistables_cn.rst b/doc/fluid/api_cn/io_cn/load_persistables_cn.rst
index 2d9def84ed16d6ce3940dab624cfb59450224f5e..24d3eac7270cee0b3b1e61be1c21ac671099e1b3 100644
--- a/doc/fluid/api_cn/io_cn/load_persistables_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_persistables_cn.rst
@@ -3,10 +3,13 @@
load_persistables
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.load_persistables(executor, dirname, main_program=None, filename=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口从给定的 ``main_program`` 中取出所有 ``persistable==True`` 的变量(即持久性变量,详见 :ref:`api_guide_model_save_reader` ),并根据目录 ``dirname`` 或 ``filename`` 提供的参数文件对这些持久性变量进行赋值。
使用 ``dirname`` 指定持久性变量的存储路径。若持久性变量以分离文件的形式保存在 ``dirname`` 指定的目录下,则设置 ``filename`` 值为None;若所有持久性变量保存在一个单独的二进制文件中,则使用 ``filename`` 来指明这个二进制文件。
diff --git a/doc/fluid/api_cn/io_cn/load_program_state_cn.rst b/doc/fluid/api_cn/io_cn/load_program_state_cn.rst
index bee50c0770b91590161de01b255d18a0ea3915c0..60b457b35043c25545522a808817f5f72899aa5b 100644
--- a/doc/fluid/api_cn/io_cn/load_program_state_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_program_state_cn.rst
@@ -5,6 +5,10 @@ load_program_state
.. py:function:: paddle.fluid.io.load_program_state(model_path, var_list=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口从本地加载 ``Program`` 的参数和优化器的变量信息到内存中。
参数:
diff --git a/doc/fluid/api_cn/io_cn/load_vars_cn.rst b/doc/fluid/api_cn/io_cn/load_vars_cn.rst
index 99b24eb1dc2f7635a174dd31734ebcb70d9171be..4126ae5eccefb3c713d20607031dd1ab2642f6fa 100644
--- a/doc/fluid/api_cn/io_cn/load_vars_cn.rst
+++ b/doc/fluid/api_cn/io_cn/load_vars_cn.rst
@@ -1,65 +1,69 @@
-.. _cn_api_fluid_io_load_vars:
-
-load_vars
--------------------------------
-
-.. py:function:: paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None)
-
-该接口从文件中加载 ``Program`` 的变量。
-
-通过 ``vars`` 指定需要加载的变量,或者通过 ``predicate`` 筛选需要加载的变量, ``vars`` 和 ``predicate`` 不能同时为None。
-
-参数:
- - **executor** (Executor) – 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。
- - **dirname** (str) – 加载变量所在的目录路径。
- - **main_program** (Program,可选) – 需要加载变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。
- - **vars** (list[Variable],可选) – 通过该列表指定需要加载的变量。默认值为None。
- - **predicate** (function,可选) – 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行加载。如果通过 ``vars`` 指定了需要加载的变量,则该参数无效。默认值为None。
- - **filename** (str,可选) – 加载所有变量的文件。如果所有待加载变量是保存在一个文件中,则设置 ``filename`` 为该文件名;如果所有待加载变量是按照变量名称单独保存成文件,则设置 ``filename`` 为None。默认值为None。
-
-返回: 无
-
-抛出异常:
- - ``TypeError`` - 如果main_program不是Program的实例,也不是None。
-
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- main_prog = fluid.Program()
- startup_prog = fluid.Program()
- with fluid.program_guard(main_prog, startup_prog):
- data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
- w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
- b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
- hidden_w = fluid.layers.matmul(x=data, y=w)
- hidden_b = fluid.layers.elementwise_add(hidden_w, b)
- place = fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(startup_prog)
-
- # 示例一:用vars来指定加载变量。
- path = "./my_paddle_vars"
- var_list = [w, b]
- fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,
- filename="vars_file")
- fluid.io.load_vars(executor=exe, dirname=path, vars=var_list,
- filename="vars_file")
- # 加载w和b。它们被保存在'var_file'的文件中,所在路径为 "./my_paddle_model" 。
-
- # 示例二:通过predicate来筛选加载变量。
- def name_has_fc(var):
- res = "fc" in var.name
- return res
-
- param_path = "./my_paddle_model"
- fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc)
- fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc)
- #加载 `main_program` 中变量名包含 ‘fc’ 的所有变量
- #此前所有变量应该保存在不同文件中
-
-
-
-
+.. _cn_api_fluid_io_load_vars:
+
+load_vars
+-------------------------------
+
+.. py:function:: paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None)
+
+:api_attr: 声明式编程模式(静态图)
+
+
+
+该接口从文件中加载 ``Program`` 的变量。
+
+通过 ``vars`` 指定需要加载的变量,或者通过 ``predicate`` 筛选需要加载的变量, ``vars`` 和 ``predicate`` 不能同时为None。
+
+参数:
+ - **executor** (Executor) – 运行的执行器,执行器的介绍请参考 :ref:`api_guide_model_save_reader` 。
+ - **dirname** (str) – 加载变量所在的目录路径。
+ - **main_program** (Program,可选) – 需要加载变量的 ``Program`` , ``Program`` 的介绍请参考 :ref:`api_guide_Program` 。如果 ``main_program`` 为None,则使用默认的主程序。默认值为None。
+ - **vars** (list[Variable],可选) – 通过该列表指定需要加载的变量。默认值为None。
+ - **predicate** (function,可选) – 通过该函数筛选 :math:`predicate(variable)== True` 的变量进行加载。如果通过 ``vars`` 指定了需要加载的变量,则该参数无效。默认值为None。
+ - **filename** (str,可选) – 加载所有变量的文件。如果所有待加载变量是保存在一个文件中,则设置 ``filename`` 为该文件名;如果所有待加载变量是按照变量名称单独保存成文件,则设置 ``filename`` 为None。默认值为None。
+
+返回: 无
+
+抛出异常:
+ - ``TypeError`` - 如果main_program不是Program的实例,也不是None。
+
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ main_prog = fluid.Program()
+ startup_prog = fluid.Program()
+ with fluid.program_guard(main_prog, startup_prog):
+ data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
+ w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
+ b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b')
+ hidden_w = fluid.layers.matmul(x=data, y=w)
+ hidden_b = fluid.layers.elementwise_add(hidden_w, b)
+ place = fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ exe.run(startup_prog)
+
+ # 示例一:用vars来指定加载变量。
+ path = "./my_paddle_vars"
+ var_list = [w, b]
+ fluid.io.save_vars(executor=exe, dirname=path, vars=var_list,
+ filename="vars_file")
+ fluid.io.load_vars(executor=exe, dirname=path, vars=var_list,
+ filename="vars_file")
+ # 加载w和b。它们被保存在'var_file'的文件中,所在路径为 "./my_paddle_model" 。
+
+ # 示例二:通过predicate来筛选加载变量。
+ def name_has_fc(var):
+ res = "fc" in var.name
+ return res
+
+ param_path = "./my_paddle_model"
+ fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc)
+ fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc)
+ #加载 `main_program` 中变量名包含 ‘fc’ 的所有变量
+ #此前所有变量应该保存在不同文件中
+
+
+
+
diff --git a/doc/fluid/api_cn/io_cn/map_readers_cn.rst b/doc/fluid/api_cn/io_cn/map_readers_cn.rst
index 55e9b7bae25ef1e2ac72a29da77a62b31a4daca3..cb50e62839cd523184fef37d4ff9dc625c52fc6b 100644
--- a/doc/fluid/api_cn/io_cn/map_readers_cn.rst
+++ b/doc/fluid/api_cn/io_cn/map_readers_cn.rst
@@ -5,6 +5,9 @@ map_readers
.. py:function:: paddle.fluid.io.map_readers(func, *readers)
+
+
+
该接口将创建一个数据读取器(Reader),其中 `func` 函数的输出将直接作为新数据读取器的输出, `readers` 的输出将作为函数 `func` 的输入参数。
例如:如果输入的 `readers` 为两个输出分别为:2、3 的 `reader` ,输入的 `func` 为乘法函数 `mul(x, y)` ,则得到的新建 `reader` 的输出为:6。
diff --git a/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst b/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst
index 4c4b697c55a0f6710ac56ddf56b022f3633f8cc8..ab600239eb702ffa2c503f88d5bd9d6ef6bda443 100644
--- a/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst
+++ b/doc/fluid/api_cn/io_cn/multiprocess_reader_cn.rst
@@ -5,6 +5,9 @@ multiprocess_reader
.. py:function:: paddle.fluid.io.multiprocess_reader(readers, use_pipe=True, queue_size=1000)
+
+
+
使用python多进程从 ``readers`` 中读取数据,然后使用 ``multiprocessing.Pipe`` 或 ``multiprocessing.Queue`` 合并所有数据。 ``readers`` 列表中的每个reader会被创建一个独立的进程来调用,reader之间应该相互独立,互不影响,避免出现多进程读取的冲突问题.
multiprocess.queue需要/dev/shm的rw访问权限,某些平台不支持。
diff --git a/doc/fluid/api_cn/io_cn/save_cn.rst b/doc/fluid/api_cn/io_cn/save_cn.rst
index 638c911d9c74a4395b21446ae3dcc282f254857a..f5095aecb12322cda0284c9337bcf27c30dd0194 100644
--- a/doc/fluid/api_cn/io_cn/save_cn.rst
+++ b/doc/fluid/api_cn/io_cn/save_cn.rst
@@ -3,43 +3,11 @@
save
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.save(program, model_path)
-该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。
-
-参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。
-
-优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。
-所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。
-
-网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。
-
-参数:
- - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。
- - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。
-
-返回: 无
-
-**代码示例**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
-
- x = fluid.data(name="x", shape=[10, 10], dtype='float32')
- y = fluid.layers.fc(x, 10)
- z = fluid.layers.fc(y, 10)
- place = fluid.CPUPlace()
- exe = fluid.Executor(place)
- exe.run(fluid.default_startup_program())
-
- fluid.save(fluid.default_main_program(), "./test_path")
-
-
-
-
+:api_attr: 声明式编程模式(静态图)
+``fluid.io.save`` 是 :ref:`cn_api_fluid_save` 的别名
diff --git a/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst b/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst
index 966a05a79c93660f0999dcc5cda2b53569b3c512..e085e41395819349bbd3b4e87aed7f351a009cfe 100644
--- a/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst
+++ b/doc/fluid/api_cn/io_cn/save_inference_model_cn.rst
@@ -3,10 +3,13 @@
save_inference_model
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, model_filename=None, params_filename=None, export_for_deployment=True, program_only=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
修剪指定的 ``main_program`` 以构建一个专门用于预测的 ``Inference Program`` ( ``Program`` 含义详见 :ref:`api_guide_Program` )。 所得到的 ``Inference Program`` 及其对应的所有相关参数均被保存到 ``dirname`` 指定的目录中。若只想保存训练后的模型参数,请使用 :ref:`cn_api_fluid_io_save_params` 接口。更多细节请参考 :ref:`api_guide_model_save_reader` 。
**注意:dirname用于指定保存预测模型结构和参数的目录。若需要将模型参数保存在指定目录的若干文件中,请设置params_filename的值为None; 若需要将所有模型参数保存在一个单独的二进制文件中,请使用params_filename来指定该二进制文件的名称。**
diff --git a/doc/fluid/api_cn/io_cn/save_params_cn.rst b/doc/fluid/api_cn/io_cn/save_params_cn.rst
index 8353acdda6ba9c1cb6c67c5ff0479e93519b40bd..879c16ed0d192fbeaa9e803f07b245bd6d5ec076 100644
--- a/doc/fluid/api_cn/io_cn/save_params_cn.rst
+++ b/doc/fluid/api_cn/io_cn/save_params_cn.rst
@@ -3,10 +3,13 @@
save_params
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.save_params(executor, dirname, main_program=None, filename=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP从 ``main_program`` 中取出所有参数,然后将它们保存到 ``dirname`` 目录下或名为 ``filename`` 的文件中。
``dirname`` 用于指定保存参数的目标路径。若想将参数保存到多个独立文件中,设置 ``filename=None`` ; 若想将所有参数保存在单个文件中,请设置 ``filename`` 来指定该文件的名称。
diff --git a/doc/fluid/api_cn/io_cn/save_persistables_cn.rst b/doc/fluid/api_cn/io_cn/save_persistables_cn.rst
index b832914d9a8914988c7f60c4839de65fc9ab9f84..9de51518e0caabcaf2a42a258116039753e5bac2 100644
--- a/doc/fluid/api_cn/io_cn/save_persistables_cn.rst
+++ b/doc/fluid/api_cn/io_cn/save_persistables_cn.rst
@@ -3,10 +3,13 @@
save_persistables
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.save_persistables(executor, dirname, main_program=None, filename=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP从给定 ``main_program`` 中取出所有持久性变量(详见 :ref:`api_guide_model_save_reader` ),然后将它们保存到目录 ``dirname`` 中或 ``filename`` 指定的文件中。
``dirname`` 用于指定保存持久性变量的目录。如果想将持久性变量保存到指定目录的若干文件中,请设置 ``filename=None`` ; 若想将所有持久性变量保存在同一个文件中,请设置 ``filename`` 来指定文件的名称。
diff --git a/doc/fluid/api_cn/io_cn/save_vars_cn.rst b/doc/fluid/api_cn/io_cn/save_vars_cn.rst
index 325c21628a89df83ef222a34afcf65f75736b815..7530540754accd0f4658ba885721fab22493d738 100644
--- a/doc/fluid/api_cn/io_cn/save_vars_cn.rst
+++ b/doc/fluid/api_cn/io_cn/save_vars_cn.rst
@@ -3,10 +3,13 @@
save_vars
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.io.save_vars(executor, dirname, main_program=None, vars=None, predicate=None, filename=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口将 ``Program`` 的变量保存到文件中。
通过 ``vars`` 指定需要保存的变量,或者通过 ``predicate`` 筛选需要保存的变量, ``vars`` 和 ``predicate`` 不能同时为None。
diff --git a/doc/fluid/api_cn/io_cn/set_program_state_cn.rst b/doc/fluid/api_cn/io_cn/set_program_state_cn.rst
index 668c7fb5f743340861e1c7c8888548a003b46140..9af0478f0c88d1bc469c094725b13a33a7f28763 100644
--- a/doc/fluid/api_cn/io_cn/set_program_state_cn.rst
+++ b/doc/fluid/api_cn/io_cn/set_program_state_cn.rst
@@ -5,6 +5,10 @@ set_program_state
.. py:function:: paddle.fluid.io.set_program_state(program, state_dict)
+:api_attr: 声明式编程模式(静态图)
+
+
+
利用 ``state_dict`` 设置 ``Program`` 的参数和优化器信息。
如果参数的 shape 或 dtype 不匹配,则会引发异常。
diff --git a/doc/fluid/api_cn/io_cn/shuffle_cn.rst b/doc/fluid/api_cn/io_cn/shuffle_cn.rst
index a986fc7b3d218b39c0062cd3390c269adae292a1..7b64c23b7e5706e23a9051f60b3a55bea7ea6573 100644
--- a/doc/fluid/api_cn/io_cn/shuffle_cn.rst
+++ b/doc/fluid/api_cn/io_cn/shuffle_cn.rst
@@ -5,6 +5,12 @@ shuffle
.. py:function:: paddle.fluid.io.shuffle(reader, buffer_size)
+:alias_main: paddle.shuffle
+:alias: paddle.shuffle,paddle.tensor.shuffle,paddle.tensor.random.shuffle
+:old_api: paddle.fluid.io.shuffle
+
+
+
该接口创建一个数据读取器,其功能是将原始数据读取器的数据打乱,然后返回无序的数据。
从原始数据读取器取出buf_size个数据到缓冲区,将缓冲区数据打乱,然后将无序的数据依次返回。当缓冲区数据全部输出后,再次执行上述步骤。
diff --git a/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst b/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst
index 7177d665242ce8e5a5137d83c60ec773eb4e9f05..5f434ecb9525c4f8dd746319d2b4baace88c89ae 100644
--- a/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst
+++ b/doc/fluid/api_cn/io_cn/xmap_readers_cn.rst
@@ -5,6 +5,9 @@ xmap_readers
.. py:function:: paddle.fluid.io.xmap_readers(mapper, reader, process_num, buffer_size, order=False)
+
+
+
多线程下,使用自定义映射器 reader 返回样本到输出队列。
参数:
diff --git a/doc/fluid/api_cn/layers_cn.rst b/doc/fluid/api_cn/layers_cn.rst
index e4b4a374f57f8dc05fd2781c74d21399612859f0..8990362cf904ee1e252ed0a04a0dfaedd5707350 100644
--- a/doc/fluid/api_cn/layers_cn.rst
+++ b/doc/fluid/api_cn/layers_cn.rst
@@ -31,6 +31,7 @@ fluid.layers
layers_cn/auc_cn.rst
layers_cn/autoincreased_step_counter_cn.rst
layers_cn/batch_norm_cn.rst
+ layers_cn/BasicDecoder_cn.rst
layers_cn/beam_search_cn.rst
layers_cn/beam_search_decode_cn.rst
layers_cn/bilinear_tensor_product_cn.rst
@@ -87,6 +88,7 @@ fluid.layers
layers_cn/dynamic_lstmp_cn.rst
layers_cn/dynamic_decode_cn.rst
layers_cn/Decoder_cn.rst
+ layers_cn/DecodeHelper_cn.rst
layers_cn/DynamicRNN_cn.rst
layers_cn/edit_distance_cn.rst
layers_cn/elementwise_add_cn.rst
@@ -108,7 +110,6 @@ fluid.layers
layers_cn/exponential_decay_cn.rst
layers_cn/eye_cn.rst
layers_cn/fc_cn.rst
- layers_cn/fill_constant_batch_size_like_cn.rst
layers_cn/fill_constant_cn.rst
layers_cn/filter_by_instag_cn.rst
layers_cn/flatten_cn.rst
@@ -117,7 +118,6 @@ fluid.layers
layers_cn/gather_cn.rst
layers_cn/gather_nd_cn.rst
layers_cn/gather_tree_cn.rst
- layers_cn/gaussian_random_batch_size_like_cn.rst
layers_cn/gaussian_random_cn.rst
layers_cn/gelu_cn.rst
layers_cn/generate_mask_labels_cn.rst
@@ -126,6 +126,7 @@ fluid.layers
layers_cn/get_tensor_from_selected_rows_cn.rst
layers_cn/greater_equal_cn.rst
layers_cn/greater_than_cn.rst
+ layers_cn/GreedyEmbeddingHelper_cn.rst
layers_cn/grid_sampler_cn.rst
layers_cn/group_norm_cn.rst
layers_cn/gru_unit_cn.rst
@@ -244,6 +245,7 @@ fluid.layers
layers_cn/rsqrt_cn.rst
layers_cn/RNNCell_cn.rst
layers_cn/sampled_softmax_with_cross_entropy_cn.rst
+ layers_cn/SampleEmbeddingHelper_cn.rst
layers_cn/sampling_id_cn.rst
layers_cn/scale_cn.rst
layers_cn/scatter_cn.rst
@@ -310,10 +312,10 @@ fluid.layers
layers_cn/thresholded_relu_cn.rst
layers_cn/topk_cn.rst
layers_cn/transpose_cn.rst
+ layers_cn/TrainingHelper_cn.rst
layers_cn/unfold_cn.rst
layers_cn/Uniform_cn.rst
layers_cn/uniform_random_cn.rst
- layers_cn/uniform_random_batch_size_like_cn.rst
layers_cn/unique_cn.rst
layers_cn/unique_with_counts_cn.rst
layers_cn/unsqueeze_cn.rst
diff --git a/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst b/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dd3820d852961be4903b9e7b6f0f10ca1eac35b8
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/BasicDecoder_cn.rst
@@ -0,0 +1,80 @@
+.. _cn_api_fluid_layers_BasicDecoder:
+
+BasicDecoder
+-------------------------------
+
+
+.. py:class:: paddle.fluid.layers.BasicDecoder(cell, helper, output_fn=None)
+
+BasicDecoder是 :ref:`cn_api_fluid_layers_Decoder` 的子类,它组装了 :ref:`cn_api_fluid_layers_RNNCell` 和 :ref:`cn_api_fluid_layers_DecodeHelper` 的实例作为成员,其中DecodeHelper用来实现不同的解码策略。它依次执行以下步骤来完成单步解码:
+
+1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。
+
+2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。
+
+3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。
+
+参数:
+ - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。
+ - **helper** (DecodeHelper) - DecodeHelper的实例。
+ - **output_fn** (可选) - 处理cell输出的接口,在采样之前使用。默认值None。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import paddle.fluid.layers as layers
+
+ start_tokens = fluid.data(name="start_tokens",
+ shape=[None],
+ dtype="int64")
+
+ trg_embeder = lambda x: fluid.embedding(
+ x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding"))
+ output_layer = lambda x: layers.fc(x,
+ size=10000,
+ num_flatten_dims=len(x.shape) - 1,
+ param_attr=fluid.ParamAttr(name=
+ "output_w"),
+ bias_attr=False)
+ helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1)
+ decoder_cell = layers.GRUCell(hidden_size=128)
+ decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer)
+ outputs = layers.dynamic_decode(
+ decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens))
+
+.. py:method:: initialize(initial_cell_states)
+
+初始化,包括helper的初始化和cell的初始化,cell初始化直接使用 :code:`initial_cell_states` 作为结果。
+
+参数:
+ - **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。这是由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供的参数。
+
+返回::code:`(initial_inputs, initial_states, finished)` 的三元组。 :code:`initial_inputs, initial_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`finished` 是bool类型的tensor。 :code:`initial_inputs, finished` 与 :code:`helper.initialize()` 返回的内容相同; :code:`initial_states` 与输入参数中的 :code:`initial_cell_states` 的相同。
+
+返回类型:tuple
+
+.. py:class:: OutputWrapper(cell_outputs, sample_ids)
+
+ :code:`step()` 的返回值中 :code:`outputs` 使用的数据结构,是一个由 :code:`cell_outputs` 和 :code:`sample_ids` 这两个字段构成的命名元组。
+
+.. py:method:: step(time, inputs, states, **kwargs)
+
+按照以下步骤执行单步解码:
+
+1. 执行 :code:`cell_outputs, cell_states = cell.call(inputs, states)` 以获取输出和新的状态。
+
+2. 执行 :code:`sample_ids = helper.sample(time, cell_outputs, cell_states)` 以采样id并将其作为当前步的解码结果。
+
+3. 执行 :code:`finished, next_inputs, next_states = helper.next_inputs(time, cell_outputs, cell_states, sample_ids)` 以产生下一解码步的结束标识、输入和状态。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。
+ - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_states` 相同。
+ - **kwargs** - 附加的关键字参数,由调用者 :ref:`cn_api_fluid_layers_dynamic_decode` 提供。
+
+返回: :code:`(outputs, next_states, next_inputs, finished)` 的四元组。 :code:`outputs` 是包含 :code:`cell_outputs` 和 :code:`sample_ids` 两个字段的命名元组,其中 :code:`cell_outputs` 是 :code:`cell.call()` 的结果, :code:`sample_ids` 是 :code:`helper.sample()` 的结果; :code:`next_states, next_inputs` 分别和输入参数中的 :code:`states, inputs` 有相同的的结构、形状和数据类型; :code:`finished` 是一个bool类型的tensor,形状是 :math:`[batch\_size]` 。
+
+返回类型:tuple
diff --git a/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst b/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst
index 45c372bc2d13ad6a17ffde72d07d7bfc00087e34..d62d05ae86bda97df4fe06e328653df5251db4cd 100644
--- a/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/BeamSearchDecoder_cn.rst
@@ -4,9 +4,12 @@ BeamSearchDecoder
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.BeamSearchDecoder(cell, start_token, end_token, beam_size, embedding_fn=None, output_fn=None)
+
+:api_attr: 声明式编程模式(静态图)
+
+
带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_
@@ -17,7 +20,7 @@ BeamSearchDecoder
- **start_token** (int) - 起始标记id。
- **end_token** (int) - 结束标记id。
- **beam_size** (int) - 在beam search中使用的beam宽度。
- - **embedding_fn** (可选) - 处理选中的候选id的接口。通常,它是一个将词id转换为词嵌入的嵌入层,函数的返回值作为 :code:`cell.call` 接口的 :code:`input` 参数。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。
+ - **embedding_fn** (可选) - 处理选中的候选id的接口。它通常是一个将词id转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size, beam\_size]` ,如果使用后者则还需要在这里提供unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。
- **output_fn** (可选) - 处理cell输出的接口,在计算得分和选择候选标记id之前使用。默认值None。
**示例代码**
@@ -82,8 +85,7 @@ BeamSearchDecoder
此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的tensor t,由minibatch中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个minibatch中的样本重复 :math:`beam\_size` 次。
参数:
- - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示对数概率。其数据类型应为float32。
- - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的tensor,表示所有beam的完成状态。其数据类型应为bool。
+ - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。
返回:具有与 :code:`x` 相同的形状和数据类型的tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的tensor(tensor中所有概率质量被分配给EOS标记)。
@@ -121,7 +123,7 @@ BeamSearchDecoder
参数:
- **initial_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。调用者提供的参数。
-返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,由 :code:`start_token` 填充,形状为 :math:`[batch\_size,beam\_size,1]` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。
+返回:一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个tensor,当 :code:`embedding_fn` 为None时,该tensor t的形状为 :math:`[batch\_size,beam\_size]` ,值为 :code:`start_token` ;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。
返回类型:tuple
@@ -133,7 +135,7 @@ BeamSearchDecoder
- **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
- **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的tensor,表示当前时间步的logits。其数据类型为float32。
- **next_cell_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。
- - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`initialize()` 返回的 :code:`beam_search_state` 相同。
+ - **beam_state** (Variable) - tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。
返回:一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。
@@ -144,9 +146,9 @@ BeamSearchDecoder
执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。
参数:
- - **time** (Variable) - 调用者提供的形状为[1]的int64tensor,表示当前解码的时间步长。
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。
- **inputs** (Variable) - tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。
- - **States** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。
+ - **states** (Variable) - tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。
- **kwargs** - 附加的关键字参数,由调用者提供。
返回:一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。 :code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。 :code:`beam_search_output` 是tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的tensor,形状为 :math:`[batch\_size,beam\_size]`。
@@ -165,12 +167,3 @@ BeamSearchDecoder
返回:一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。
返回类型:tuple
-
-.. py:method:: output_dtype()
-
-用于beam search输出的数据类型的嵌套结构。它是一个命名元组,字段包括 :code:`scores, predicted_ids, parent_ids`。
-
-参数:无。
-
-返回:用于beam search输出的数据类型的命名元组。
-
diff --git a/doc/fluid/api_cn/layers_cn/Categorical_cn.rst b/doc/fluid/api_cn/layers_cn/Categorical_cn.rst
index 7cf79dbbf9e9d19ffc4997e0cbadf52207bfe156..9265a666d8460b9b37679ae73f1e5a653e3576f3 100644
--- a/doc/fluid/api_cn/layers_cn/Categorical_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Categorical_cn.rst
@@ -5,6 +5,9 @@ Categorical
.. py:class:: paddle.fluid.layers.Categorical(logits)
+
+
+
类别分布是一种离散概率分布,其随机变量可以取K个相互独立类别的其中一个。
概率质量函数(pmf)为:
diff --git a/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst b/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..14ad49c37982245c138bb04b7377d9b40edc6fa1
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/DecodeHelper_cn.rst
@@ -0,0 +1,44 @@
+.. _cn_api_fluid_layers_DecodeHelper:
+
+DecodeHelper
+-------------------------------
+
+
+.. py:class:: paddle.fluid.layers.DecodeHelper()
+
+DecodeHelper是一个基类,其子类的实例将在 :ref:`cn_api_fluid_layers_BasicDecoder` 中使用。它提供了在动态解码时采样和产生下一解码步的输入的接口。
+
+.. py:method:: initialize()
+
+初始化以产生第一个解码步的输入和每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。
+
+返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。
+
+返回类型:tuple
+
+.. py:method:: sample(time, outputs, states)
+
+根据 :code:`outputs` 以特定的方式进行采样,该方法是 :code:`BasicDecoder.step` 中的一部分。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+
+返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。
+
+返回类型:Variable
+
+.. py:method:: next_inputs(time, outputs, states, sample_ids)
+
+产生下一解码步的输入、状态,以及每个序列是否结束的标识。该方法是 :code:`BasicDecoder.step` 中的一部分。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+ - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。
+
+返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构, :code:`next_states` 和输入参数中的 :code:`states` 具有相同的结构、形状和数据类型; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。
+
+返回类型:tuple
diff --git a/doc/fluid/api_cn/layers_cn/Decoder_cn.rst b/doc/fluid/api_cn/layers_cn/Decoder_cn.rst
index 893119253f16ceb445cdbcac726a054a8e5e8ad3..ffe67dc97342f0ef561d0350c38806ed8bd15ce5 100644
--- a/doc/fluid/api_cn/layers_cn/Decoder_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Decoder_cn.rst
@@ -4,10 +4,13 @@ Decoder
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.Decoder()
+:api_attr: 声明式编程模式(静态图)
+
+
+
Decoder是dynamic_decode中使用的任何decoder实例的基类。它提供了为每一个时间步生成输出的接口,可用于生成序列。
@@ -36,13 +39,28 @@ Decoder提供的主要抽象为:
返回类型:tuple
-.. py:method:: step(time, inputs, states)
+.. py:method:: step(time, inputs, states, **kwargs)
在解码的每个时间步中被调用的接口
参数:
- - **outputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 结构和数据类型与 :code:`output_dtype` 相同。 tensor堆叠所有时间步长的输出从而具有shape :math:`[time\_step,batch\_size,...]` ,由调用者完成。
- - **final_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 它是 :code:`decoder.step` 在最后一个解码步返回的 :code:`next_states`, 因此具有与任何时间步长的状态相同的结构,形状和数据类型。
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。。
+ - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。
+ - **kwargs** - 附加的关键字参数,由调用者提供。
+
+返回:一个元组 :code:`(outputs, next_states, next_inputs, finished)` 。:code:`next_states` 和 :code:`next_inputs` 都是单个tensor变量或tensor变量组成的嵌套结构,且结构、形状和数据类型均分别与输入参数中的 :code:`states` 和 :code:`inputs` 相同。 :code:`outputs` 是单个tensor变量或tensor变量组成的嵌套结构。 :code:`finished` 是一个bool类型的tensor变量。
+
+返回类型:tuple
+
+.. py:method:: finalize(self, outputs, final_states, sequence_lengths)
+
+如果提供了实现,将在整个解码迭代结束后被执行一次。
+
+参数:
+ - **outputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 其中每个tensor的形状均为 :math:`[time\_step,batch\_size,...]` ,是将所有解码步中与其对应的的输出进行堆叠的结果,这个过程由其调用者完成。
+ - **final_states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。 它是 :code:`decoder.step` 在最后一个解码步返回的 :code:`next_states`, 因此具有与任何时间步的状态相同的结构,形状和数据类型。
+ - **kwargs** - 命名关键字参数,由提供调用者。
返回:一个元组 :code:`(final_outputs, final_states)` 。:code:`final_outputs` 和 :code:`final_states` 都是单个tensor变量或tensor变量组成的嵌套结构。
diff --git a/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst b/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst
index 50f6f41880b7473dad630765958ecb0f368a7127..e082d6bf6e58467803050e426a8fa1cc1fcc9193 100644
--- a/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/DynamicRNN_cn.rst
@@ -3,10 +3,13 @@
DynamicRNN
===================
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.DynamicRNN(name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该类型的输入仅支持LoDTensor,如果您需要处理的输入数据是Tensor类型,
请使用StaticRNN( fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。**
diff --git a/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst b/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst
index 3acb79d737531d6f2648081c25336d636e119611..a714757be122b2c7dd2cd936f809725991807996 100644
--- a/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/GRUCell_cn.rst
@@ -3,9 +3,12 @@
GRUCell
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.GRUCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, dtype="float32", name="GRUCell")
+
+:api_attr: 声明式编程模式(静态图)
+
+
门控循环单元(Gated Recurrent Unit)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicGRUUnit` 包装,来让它可以应用于RNNCell。
@@ -35,7 +38,7 @@ GRUCell
.. code-block:: python
import paddle.fluid.layers as layers
- cell = layers.rnn.GRUCell(hidden_size=256)
+ cell = layers.GRUCell(hidden_size=256)
.. py:method:: call(inputs, states)
diff --git a/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a01e1ab3e575b54855d774e29057ffc2b7d04a8f
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/GreedyEmbeddingHelper_cn.rst
@@ -0,0 +1,74 @@
+.. _cn_api_fluid_layers_GreedyEmbeddingHelper:
+
+GreedyEmbeddingHelper
+-------------------------------
+
+
+.. py:class:: paddle.fluid.layers.GreedyEmbeddingHelper(embedding_fn, start_tokens, end_token)
+
+GreedyEmbeddingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它使用 :code:`argmax` 进行采样,并将采样结果送入embedding层,以此作为下一解码步的输入。
+
+参数:
+ - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。
+ - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。
+ - **end_token** (int) - 结束标记id。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import paddle.fluid.layers as layers
+
+ start_tokens = fluid.data(name="start_tokens",
+ shape=[None],
+ dtype="int64")
+
+ trg_embeder = lambda x: fluid.embedding(
+ x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding"))
+ output_layer = lambda x: layers.fc(x,
+ size=10000,
+ num_flatten_dims=len(x.shape) - 1,
+ param_attr=fluid.ParamAttr(name=
+ "output_w"),
+ bias_attr=False)
+ helper = layers.GreedyEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1)
+ decoder_cell = layers.GRUCell(hidden_size=128)
+ decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer)
+ outputs = layers.dynamic_decode(
+ decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens))
+
+.. py:method:: initialize()
+
+GreedyEmbeddingHelper初始化,其使用构造函数中的 :code:`start_tokens` 作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。
+
+返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 同构造函数中的 :code:`start_tokens` ; :code:`initial_finished` 是一个bool类型、值为False的tensor,其形状和 :code:`start_tokens` 相同。
+
+返回类型:tuple
+
+.. py:method:: sample(time, outputs, states)
+
+使用 :code:`argmax` 根据 `outputs` 进行采样。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+
+返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。
+
+返回类型:Variable
+
+.. py:method:: next_inputs(time, outputs, states, sample_ids)
+
+对 :code:`sample_ids` 使用 :code:`embedding_fn` ,以此作为下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并通过判别 :code:`sample_ids` 是否得到 :code:`end_token`,依此产生每个序列是否结束的标识。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+ - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。
+
+返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。
+
+返回类型:tuple
diff --git a/doc/fluid/api_cn/layers_cn/IfElse_cn.rst b/doc/fluid/api_cn/layers_cn/IfElse_cn.rst
index 295fd454fbcdde779b1f9c98ce69902a7857b724..2ba7ff3b9eef50d97ba80f5e323df16ce7d6e815 100644
--- a/doc/fluid/api_cn/layers_cn/IfElse_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/IfElse_cn.rst
@@ -3,10 +3,13 @@
IfElse
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.IfElse(cond, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该类用于实现IfElse分支控制功能, IfElse包含两个Block,true_block,false_block,IfElse会将满足True或False条件的数据分别放入不同的block运行。
cond是一个shape为[N, 1]、数据类型为bool的2-D tensor,表示输入数据对应部分的执行条件。
diff --git a/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst b/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst
index 09d21492757c836fe1e4799e586e6b6d7001a204..183dd4ff210000f8c9138d18d402d431d245dd86 100644
--- a/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/LSTMCell_cn.rst
@@ -4,9 +4,12 @@ LSTMCell
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.LSTMCell(hidden_size, param_attr=None, bias_attr=None, gate_activation=None, activation=None, forget_bias=1.0, dtype="float32", name="LSTMCell")
+
+:api_attr: 声明式编程模式(静态图)
+
+
长短期记忆单元(Long-Short Term Memory)。通过对 :code:`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` 包装,来让它可以应用于RNNCell。
@@ -38,7 +41,7 @@ LSTMCell
.. code-block:: python
import paddle.fluid.layers as layers
- cell = layers.rnn.LSTMCell(hidden_size=256)
+ cell = layers.LSTMCell(hidden_size=256)
.. py:method:: call(inputs, states)
@@ -61,4 +64,4 @@ LSTMCell的 :code:`state_shape` 是一个具有两个形状的列表::math:`[[
返回:LSTMCell的 :code:`state_shape`
-返回类型:list
\ No newline at end of file
+返回类型:list
diff --git a/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst b/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst
index 9af102144b2ee832e04a282560034b8b487450c5..63e1d1bb2492d6f56c6ccbd7d7ada4505087ec25 100644
--- a/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/MultivariateNormalDiag_cn.rst
@@ -5,6 +5,9 @@ MultivariateNormalDiag
.. py:class:: paddle.fluid.layers.MultivariateNormalDiag(loc, scale)
+
+
+
多元高斯分布
概率密度函数(pdf)为:
diff --git a/doc/fluid/api_cn/layers_cn/Normal_cn.rst b/doc/fluid/api_cn/layers_cn/Normal_cn.rst
index d33e568d52bd2b6d2d827cf8bf24216a86474270..ce50e67bd12563ee7c24b6ab4141acf0ccf0c303 100644
--- a/doc/fluid/api_cn/layers_cn/Normal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Normal_cn.rst
@@ -5,6 +5,9 @@ Normal
.. py:class:: paddle.fluid.layers.Normal(loc, scale)
+
+
+
正态分布
数学公式:
@@ -31,7 +34,7 @@ Normal
import numpy as np
from paddle.fluid import layers
- from paddle.fluid.layers import Normal
+ from paddle.fluid.layers import Normal
# 定义参数为float的正态分布。
dist = Normal(loc=0., scale=3.)
diff --git a/doc/fluid/api_cn/layers_cn/Print_cn.rst b/doc/fluid/api_cn/layers_cn/Print_cn.rst
index 1851dd7145a80cb06d4d37f5098d891e5fe39718..422bab77d425f81f2097ef9e297a7cf50335b1d6 100644
--- a/doc/fluid/api_cn/layers_cn/Print_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Print_cn.rst
@@ -3,10 +3,13 @@
Print
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.Print(input, first_n=-1, message=None, summarize=20, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, print_tensor_lod=True, print_phase='both')
+:api_attr: 声明式编程模式(静态图)
+
+
+
**Print操作命令**
该OP创建一个打印操作,打印正在访问的Tensor内容。
diff --git a/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst b/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst
index d00a275f949673afa1392a1d5631fdef45451be6..1368e2ac33f57a483ced44c49ccf65aa83671f7a 100644
--- a/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/RNNCell_cn.rst
@@ -4,9 +4,12 @@ RNNCell
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.RNNCell(name=None)
+
+:api_attr: 声明式编程模式(静态图)
+
+
RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态的计算,主要用于RNN。
.. py:method:: call(inputs, states, **kwargs)
@@ -18,11 +21,11 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态
- **states** - 状态,单个tensor变量或tensor变量组成的嵌套结构。
- **kwargs** - 附加的关键字参数,由调用者提供。
-返回:输出和新状态。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。
+返回:包含输出和新状态的二元组 :code:`(outputs,new_states)` 。输出和新状态都可以是嵌套的tensor变量。新状态必须具有与状态相同的结构。
返回类型:tuple
-.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0)
+.. py:method:: get_initial_states(batch_ref, shape=None, dtype=None, init_value=0, batch_dim_idx=0)
该接口根据提供的形状,数据类型和初始值来初始化状态。
@@ -31,6 +34,7 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态
- **shape** - 单个形状或形状组成的嵌套结构,单个形状是整数的列表或元组。 如果形状的第一维不是batch大小,则自动插入-1作为batch大小。 如果该项为None,将使用属性 :code:`state_shape`。默认值为None。
- **dtype** - 单个数据类型或由数据类型组成的嵌套结构。该结构必须与shape的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 如果是None并且属性 :code:`cell.state_shape` 不可用,则float32将用作数据类型。 默认值为None。
- **init_value** - 用于初始化状态的浮点值。
+ - **batch_dim_idx** - 用于指示 :code:`batch_ref` 中batch所在维度的int值,默认值为0。
返回:和shape具有相同结构的tensor变量,代表初始状态。
@@ -38,9 +42,9 @@ RNNCell是抽象的基类,代表将输入和状态映射到输出和新状态
.. py:method:: state_shape()
-该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。
+抽象方法(属性),该接口用于初始化cell的状态。 单个形状或由形状组成的嵌套结构,单个形状可以是整数的列表或元组(如果形状的第一维不是batch大小,则自动插入-1作为batch大小)。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`shape` 参数的时候,不用实现该方法。
.. py:method:: state_dtype()
-该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。
+抽象方法(属性),该接口用于初始化cell的状态。 单个数据类型或由数据类型组成的嵌套结构,该结构必须与 :code:`shape` 的结构相同,例外是当状态中的所有tensor都具有相同的数据类型,这时可以使用单个数据类型。 当没有使用 :code:`get_initial_states` 初始化状态或 :code:`get_initial_states` 没有提供 :code:`dtype` 参数的时候,不用实现该方法。
diff --git a/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c38b80052fe9040d84d3ed3ba353e6e02cfe5a9c
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/SampleEmbeddingHelper_cn.rst
@@ -0,0 +1,54 @@
+.. _cn_api_fluid_layers_SampleEmbeddingHelper:
+
+SampleEmbeddingHelper
+-------------------------------
+
+
+.. py:class:: paddle.fluid.layers.SampleEmbeddingHelper(embedding_fn, start_tokens, end_token, softmax_temperature=None, seed=None)
+
+SampleEmbeddingHelper是 :ref:`cn_api_fluid_layers_GreedyEmbeddingHelper` 的子类。作为解码helper,它通过采样而非使用 :code:`argmax` 并将采样结果送入embedding层,以此作为下一解码步的输入。
+
+参数:
+ - **embedding_fn** (callable) - 作用于 :code:`argmax` 结果的函数,通常是一个将词id转换为词嵌入的embedding层,**注意** ,这里要使用 :ref:`cn_api_fluid_embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size]` ,如果使用后者则还需要在这里提供unsqueeze。
+ - **start_tokens** (Variable) - 形状为 :math:`[batch\_size]` 、数据类型为int64、 值为起始标记id的tensor。
+ - **end_token** (int) - 结束标记id。
+ - **softmax_temperature** (float,可选) - 该值用于在softmax计算前除以logits。温度越高(大于1.0)随机性越大,温度越低则越趋向于argmax。该值必须大于0,默认值None等同于1.0。
+ - **seed** (int,可选) - 采样使用的随机种子。默认为None,表示不使用固定的随机种子。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import paddle.fluid.layers as layers
+
+ start_tokens = fluid.data(name="start_tokens",
+ shape=[None],
+ dtype="int64")
+
+ trg_embeder = lambda x: fluid.embedding(
+ x, size=[10000, 128], param_attr=fluid.ParamAttr(name="trg_embedding"))
+ output_layer = lambda x: layers.fc(x,
+ size=10000,
+ num_flatten_dims=len(x.shape) - 1,
+ param_attr=fluid.ParamAttr(name=
+ "output_w"),
+ bias_attr=False)
+ helper = layers.SampleEmbeddingHelper(trg_embeder, start_tokens=start_tokens, end_token=1)
+ decoder_cell = layers.GRUCell(hidden_size=128)
+ decoder = layers.BasicDecoder(decoder_cell, helper, output_fn=output_layer)
+ outputs = layers.dynamic_decode(
+ decoder=decoder, inits=decoder_cell.get_initial_states(start_tokens))
+
+.. py:method:: sample(time, outputs, states)
+
+根据一个多项分布进行采样,此分布由 :code:`softmax(outputs/softmax_temperature)` 计算得到。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+
+返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。
+
+返回类型:Variable
diff --git a/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst b/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst
index 013ee3b9962a556acf5682d54ab6541dde961c41..ee524611b8f70b678c80d24a89728b9af20da90f 100644
--- a/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/StaticRNN_cn.rst
@@ -3,10 +3,13 @@
StaticRNN
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.StaticRNN(name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP用来处理一批序列数据,其中每个样本序列的长度必须相等。StaticRNN将序列按照时间步长展开,用户需要定义每个时间步中的处理逻辑。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/Switch_cn.rst b/doc/fluid/api_cn/layers_cn/Switch_cn.rst
index 67e2ea544fefe2b814e91628b226f45779c6a4c0..e4d132861ae84f5d6ba6befc42a7b6811aed4d40 100644
--- a/doc/fluid/api_cn/layers_cn/Switch_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Switch_cn.rst
@@ -3,10 +3,13 @@
Switch
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.Switch (name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该类用于实现Switch分支控制功能。Switch分支包含多个case分支和一个default分支,Switch控制流会依次检查各case分支条件是否满足,并仅执行第一个满足条件的case分支后面的语句。若不存在满足条件的case分支,则仅执行default分支后面的语句。
.. note::
diff --git a/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst b/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5d140dbf8ac61370b3a0c7a33a50f56c378e4929
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/TrainingHelper_cn.rst
@@ -0,0 +1,70 @@
+.. _cn_api_fluid_layers_TrainingHelper:
+
+TrainingHelper
+-------------------------------
+
+
+.. py:class:: paddle.fluid.layers.TrainingHelper(inputs, sequence_length, time_major=False)
+
+TrainingHelper是 :ref:`cn_api_fluid_layers_DecodeHelper` 的子类。作为解码helper,它在每个解码时间步通过在完整序列输入 :code:`inputs` 的相应位置切片作为各步的输入,并且使用 :code:`argmax` 根据 :code:`cell.call()` 的输出进行采样。
+由于要求有完整的序列输入 :code:`inputs` ,TrainingHelper主要用于以teach-forcing的方式进行最大似然训练,采样得到的内容通常不会使用。
+
+参数:
+ - **inputs** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构。当 :code:`time_major == False` 时,tensor的形状应为 :math:`[batch\_size, sequence\_length, ...]`;当 :code:`time_major == True` 时,tensor的形状应为 :math:`[sequence\_length, batch\_size, ...]`。在解码的每一步都要从中切片取出相应的数据。
+ - **sequence_length** (Variable) - 形状为 :math:`[batch\_size]` 的tensor。它存储了 :code:`inputs` 中每个样本的实际长度,可以据此来标识每个解码步中每个样本是否结束。
+ - **time_major** (bool,可选) - 指示输入tensor和输出tensor中包含的tensor的数据组织。如果为False,则数据组织为batch为主,形状为 :math:`[batch\_size,sequence\_length,...]`。如果为True,则数据组织为time为主,形状为 :math:`[sequence\_length,batch\_size,...]`。默认值:False。
+
+**示例代码**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import paddle.fluid.layers as layers
+ trg_emb = fluid.data(name="trg_emb",
+ shape=[None, None, 128],
+ dtype="float32")
+ trg_seq_length = fluid.data(name="trg_seq_length",
+ shape=[None],
+ dtype="int64")
+ helper = layers.TrainingHelper(trg_emb, trg_seq_length)
+ decoder_cell = layers.GRUCell(hidden_size=128)
+ decoder = layers.BasicDecoder(decoder_cell, helper)
+ outputs = layers.dynamic_decode(
+ decoder,
+ inits=decoder_cell.get_initial_states(trg_emb),
+ is_test=False)
+
+.. py:method:: initialize()
+
+TrainingHelper初始化,其通过在完整序列输入 :code:`inputs` 中首个时间步的位置上切片,以此作为第一个解码步的输入,并给出每个序列是否结束的初始标识。这是 :ref:`cn_api_fluid_layers_BasicDecoder` 初始化的一部分。
+
+返回::code:`(initial_inputs, initial_finished)` 的二元组, :code:`initial_inputs` 是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` 。 :code:`initial_finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。
+
+返回类型:tuple
+
+.. py:method:: sample(time, outputs, states)
+
+使用 :code:`argmax` 根据 `outputs` 进行采样。由于使用完整序列中的切片作为下一解码步的输入,采样得到的内容通常不会使用。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+
+返回:数据类型为int64形状为 :math:`[batch\_size]` 的tensor,表示采样得到的id。
+
+返回类型:Variable
+
+.. py:method:: next_inputs(time, outputs, states, sample_ids)
+
+从完整序列输入中当前时间步的位置上切片,以此作为产生下一解码步的输入;同时直接使用输入参数中的 :code:`states` 作为下一解码步的状态;并比较当前时间与每个序列的大小,依此产生每个序列是否结束的标识。
+
+参数:
+ - **time** (Variable) - 调用者提供的形状为[1]的tensor,表示当前解码的时间步长。其数据类型为int64。
+ - **outputs** (Variable) - tensor变量,通常其数据类型为float32或float64,形状为 :math:`[batch\_size, vocabulary\_size]` ,表示当前解码步预测产生的logit(未归一化的概率),和由 :code:`BasicDecoder.output_fn(BasicDecoder.cell.call())` 返回的 :code:`outputs` 是同一内容。
+ - **states** (Variable) - 单个tensor变量或tensor变量组成的嵌套结构,和由 :code:`BasicDecoder.cell.call()` 返回的 :code:`new_states` 是同一内容。
+ - **sample_ids** (Variable) - 数据类型为int64形状为 :math:`[batch\_size]` 的tensor,和由 :code:`sample()` 返回的 :code:`sample_ids` 是同一内容。
+
+返回: :code:`(finished, next_inputs, next_states)` 的三元组。 :code:`next_inputs, next_states` 均是单个tensor变量或tensor变量组成的嵌套结构,tensor的形状是 :math:`[batch\_size, ...]` , :code:`next_states` 和输入参数中的 :code:`states` 相同; :code:`finished` 是一个bool类型且形状为 :math:`[batch\_size]` 的tensor。
+
+返回类型:tuple
diff --git a/doc/fluid/api_cn/layers_cn/Uniform_cn.rst b/doc/fluid/api_cn/layers_cn/Uniform_cn.rst
index 601b1a7b1933492b22059cf96007930dad098933..59e1544b3751afacf4002cfa859a6827df1de187 100644
--- a/doc/fluid/api_cn/layers_cn/Uniform_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/Uniform_cn.rst
@@ -5,6 +5,9 @@ Uniform
.. py:class:: paddle.fluid.layers.Uniform(low, high)
+
+
+
均匀分布
概率密度函数(pdf)为:
diff --git a/doc/fluid/api_cn/layers_cn/While_cn.rst b/doc/fluid/api_cn/layers_cn/While_cn.rst
index 74d4da040a40c77ce90d519ca2157c31f23de7b9..0e851830bd2dfeca29306e43cae88acb9aa2b798 100644
--- a/doc/fluid/api_cn/layers_cn/While_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/While_cn.rst
@@ -3,10 +3,13 @@
While
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:class:: paddle.fluid.layers.While (cond, is_test=False, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该类用于实现while循环控制功能,只要循环条件cond为True,就循环执行while循环体中的语句,直到cond为False为止。
@@ -14,14 +17,18 @@ While
如果参数 ``cond`` 的形状为[1],强烈建议您使用新的OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。
OP :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``While`` 一样。
+**注意:**
+ 在 ``While`` 中创建的局部变量类似于C++中的while,无法被外部引用,因此无法通过 ``Executor`` 中的 ``fetch_list`` 来获取。
+ 若想实现该功能,PaddlePaddle提供了 ``assign`` 接口将局部变量赋值到外部,请参考示例代码2 或参考 `issue#22724 `_ 。
+
参数:
- **cond** (Variable) – 用于判断循环继续进行的条件,为数据类型bool型的Tensor,其shape必须为[1]。
- **is_test** (bool,可选) – 用于表明是否在测试阶段执行,默认值为False。
- **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
-**代码示例**
+**代码示例 1**
-.. code-block:: python
+.. code-block:: python
# 该示例代码展示整数循环+1,循环10次,输出计数结果
import paddle.fluid as fluid
@@ -44,7 +51,33 @@ While
print(res) # [array([10])]
+**代码示例 2**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ import numpy as np
+
+ i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
+ loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
+ one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1)
+ data = fluid.data(name='data', shape=[1], dtype='float32')
+ sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # 在 While 外先定义要获取的变量,需和要获取的 While 内部的变量名称不同
+ cond = fluid.layers.less_than(x=i, y=loop_len)
+ while_op = fluid.layers.While(cond=cond)
+ with while_op.block():
+ sums_tensor = fluid.layers.elementwise_add(x=data, y=data)
+ fluid.layers.assign(input=sums_tensor, output=sums) # 将 While 内定义的变量 sums_tenosr 通过 layers.assign 更新至 While 外的变量 sums 中
+ i = fluid.layers.increment(x=i, value=1, in_place=True)
+ data = fluid.layers.elementwise_add(x=data, y=one)
+ fluid.layers.less_than(x=i, y=loop_len, cond=cond)
+
+ feed_data = np.ones([1]).astype('float32')
+ exe = fluid.Executor(fluid.CPUPlace())
+ exe.run(fluid.default_startup_program())
+ res = exe.run(fluid.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
+ print(res[0]) # [2.] # 因 While 内的 data 没有将值更新到 While 外,故循环过后此处 sums 的值为 [2.]
diff --git a/doc/fluid/api_cn/layers_cn/abs_cn.rst b/doc/fluid/api_cn/layers_cn/abs_cn.rst
index 754f65f3870fa11cfbc242339afa5d21eb0b843c..755477bb7a34a13226d90a7a2c421af3eb792bcf 100644
--- a/doc/fluid/api_cn/layers_cn/abs_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/abs_cn.rst
@@ -5,23 +5,33 @@ abs
.. py:function:: paddle.fluid.layers.abs(x, name=None)
-绝对值激活函数。
+:alias_main: paddle.abs
+:alias: paddle.abs,paddle.tensor.abs,paddle.tensor.math.abs
+:old_api: paddle.fluid.layers.abs
+
+
+
+绝对值函数。
.. math::
out = |x|
参数:
- - **x** (Variable)- 多维Tensor,数据类型为float32或float64。
- - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回:表示绝对值结果的Tensor,数据类型与x相同。
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
-返回类型:Variable
+返回类型:Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.data(name="input", shape=[32, 784])
- result = fluid.layers.abs(data)
+ import paddle
+ paddle.disable_static()
+
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.abs(x)
+ print(out.numpy())
+ # [0.4 0.2 0.1 0.3]
diff --git a/doc/fluid/api_cn/layers_cn/accuracy_cn.rst b/doc/fluid/api_cn/layers_cn/accuracy_cn.rst
index b83239c01adf66fe6694121bda5a947540de1a86..15c3d9efcb115065c8b41034ecc0d4703ffb1730 100755
--- a/doc/fluid/api_cn/layers_cn/accuracy_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/accuracy_cn.rst
@@ -5,6 +5,9 @@ accuracy
.. py:function:: paddle.fluid.layers.accuracy(input, label, k=1, correct=None, total=None)
+
+
+
accuracy layer。 参考 https://en.wikipedia.org/wiki/Precision_and_recall
使用输入和标签计算准确率。 如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。
diff --git a/doc/fluid/api_cn/layers_cn/acos_cn.rst b/doc/fluid/api_cn/layers_cn/acos_cn.rst
index 408b2ed70566f02ed4edcdc486b1a01671407154..288c3121081ac22005f6bd0926cd6ed42d4675a0 100644
--- a/doc/fluid/api_cn/layers_cn/acos_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/acos_cn.rst
@@ -5,29 +5,34 @@ acos
.. py:function:: paddle.fluid.layers.acos(x, name=None)
-arccosine激活函数。
+:alias_main: paddle.acos
+:alias: paddle.acos,paddle.tensor.acos,paddle.tensor.math.acos
+:old_api: paddle.fluid.layers.acos
+
+
+
+arccosine函数。
.. math::
out = cos^{-1}(x)
参数:
- - **x(Variable)** - acos的输入Tensor,数据类型为 float32 或 float64
- - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。
-返回: `acos` 的输出Tensor,数据类型与 `x` 相同。
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回类型: Variable
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
+返回类型: Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.data(name="input", shape=[4])
- # if data is [-0.8183, 0.4912, -0.6444, 0.0371]
- result = fluid.layers.acos(data)
- # result is [2.5293, 1.0573, 2.2711, 1.5336]
-
-
+ import paddle
+ paddle.disable_static()
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.acos(x)
+ print(out.numpy())
+ # [1.98231317 1.77215425 1.47062891 1.26610367]
diff --git a/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst b/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst
index 8f4c38c6954fe90d614c1b72a3f04fef61a51f93..c2a5026955e2e2c1e5484ec1799ced2851921a88 100644
--- a/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/adaptive_pool2d_cn.rst
@@ -5,6 +5,12 @@ adaptive_pool2d
.. py:function:: paddle.fluid.layers.adaptive_pool2d(input, pool_size, pool_type='max', require_index=False, name=None)
+:alias_main: paddle.nn.functional.adaptive_pool2d
+:alias: paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d
+:old_api: paddle.fluid.layers.adaptive_pool2d
+
+
+
该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。
输入X和输出Out是NCHW格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表高度和宽度上的参数。输出Out的H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1] \right )`
diff --git a/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst b/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst
index 2b166e0449022c815454d28f4395733090dd9d0e..ec7c4d13210b745f6dba3bd307ab1587558c2535 100644
--- a/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/adaptive_pool3d_cn.rst
@@ -5,6 +5,12 @@ adaptive_pool3d
.. py:function:: paddle.fluid.layers.adaptive_pool3d(input, pool_size, pool_type='max', require_index=False, name=None)
+:alias_main: paddle.nn.functional.adaptive_pool3d
+:alias: paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d
+:old_api: paddle.fluid.layers.adaptive_pool3d
+
+
+
该OP使用上述输入参数的池化配置,为二维空间自适应池化操作,根据 ``input`` , 池化类型 ``pool_type`` , 池化核大小 ``pool_size`` 这些参数得到输出。
输入X和输出Out是NCDHW格式,N为批大小,D是特征深度,C是通道数,H是特征高度,W是特征宽度。参数 ``pool_size`` 含有两个整型元素, 分别代表深度,高度和宽度上的参数。输出Out的D, H和W维由 ``pool_size`` 决定,即输出shape为 :math:`\left ( N,C,pool_size[0],pool_size[1],pool_size[2] \right )`
diff --git a/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst b/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst
index 855acbfb1695606c7cd75156d9806316b69e918f..abba3377ba359ac496848262f1fe29705ea504b0 100644
--- a/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/add_position_encoding_cn.rst
@@ -5,6 +5,12 @@ add_position_encoding
.. py:function:: paddle.fluid.layers.add_position_encoding(input, alpha, beta, name=None)
+:alias_main: paddle.nn.functional.add_position_encoding
+:alias: paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding
+:old_api: paddle.fluid.layers.add_position_encoding
+
+
+
该OP将输入inpu中每个位置(序列中的位置)的特征与对应的位置编码加权求和,位置编码可参考论文: `Attention Is All You Need `_
输出的计算公式如下:
@@ -34,14 +40,13 @@ add_position_encoding
.. code-block:: python
- import paddle.fluid as fluid
-
- tensor = fluid.layers.data(
+ import paddle.fluid as fluid
+
+ tensor = fluid.data(
name='tensor',
- shape=[32, 64, 512],
- dtype='float32',
- append_batch_size=False)
- position_tensor = fluid.layers.add_position_encoding(
+ shape=[None, 64, 512],
+ dtype='float32')
+ position_tensor = fluid.layers.add_position_encoding(
input=tensor, alpha=1.0, beta=1.0)
@@ -53,4 +58,3 @@ add_position_encoding
-
diff --git a/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst b/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst
index 1d2fafaf639c834cede7b3136111dd50c9420d0e..c810d489a98304681ab230606d87b13bfed49ca4 100644
--- a/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/affine_channel_cn.rst
@@ -5,6 +5,12 @@ affine_channel
.. py:function:: paddle.fluid.layers.affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None,act=None)
+:alias_main: paddle.nn.functional.affine_channel
+:alias: paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel
+:old_api: paddle.fluid.layers.affine_channel
+
+
+
对输入的每个 channel 应用单独的仿射变换。用于将空间批量归一化替换为其等价的固定变换。
输入也可以是二维张量,并在第二维应用仿射变换。
diff --git a/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst b/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst
index 3f143f3942e3fb2f85142467479747a93ee3a13d..287116d9f5f6845eb0f874a78c329ca49676b14d 100644
--- a/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/affine_grid_cn.rst
@@ -5,6 +5,12 @@ affine_grid
.. py:function:: paddle.fluid.layers.affine_grid(theta, out_shape, name=None)
+:alias_main: paddle.nn.functional.affine_grid
+:alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid
+:old_api: paddle.fluid.layers.affine_grid
+
+
+
该OP用于生成仿射变换前后的feature maps的坐标映射关系。在视觉应用中,根据该OP得到的映射关系,将输入feature map的像素点变换到对应的坐标,就得到了经过仿射变换的feature map。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst b/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst
index 0b7c5269aabe417880b14857002bc81e97086784..ada0130cce84b1f94d57f60859b04f3b83d1d6ca 100644
--- a/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/anchor_generator_cn.rst
@@ -5,6 +5,12 @@ anchor_generator
.. py:function:: paddle.fluid.layers.anchor_generator(input, anchor_sizes=None, aspect_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], stride=None, offset=0.5, name=None)
+:alias_main: paddle.nn.functional.anchor_generator
+:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
+:old_api: paddle.fluid.layers.anchor_generator
+
+
+
**Anchor generator operator**
为RCNN算法生成anchor,输入的每一位产生N个anchor,N=size(anchor_sizes)*size(aspect_ratios)。生成anchor的顺序首先是aspect_ratios循环,然后是anchor_sizes循环。
diff --git a/doc/fluid/api_cn/layers_cn/argmax_cn.rst b/doc/fluid/api_cn/layers_cn/argmax_cn.rst
index 47b6a49dec02eed0685b48064ce97c32af994cae..d165ce8d6997f060e53c2754239be83b6298ef2c 100644
--- a/doc/fluid/api_cn/layers_cn/argmax_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/argmax_cn.rst
@@ -5,6 +5,9 @@ argmax
.. py:function:: paddle.fluid.layers.argmax(x, axis=0)
+
+
+
**argmax**
该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。
diff --git a/doc/fluid/api_cn/layers_cn/argmin_cn.rst b/doc/fluid/api_cn/layers_cn/argmin_cn.rst
index 885515bd30dc3c364718fd6b2ed1ab1cc6abf261..74ba5fbc52a2cc285cd9d1a370246e028ec0b14c 100644
--- a/doc/fluid/api_cn/layers_cn/argmin_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/argmin_cn.rst
@@ -5,6 +5,12 @@ argmin
.. py:function:: paddle.fluid.layers.argmin(x, axis=0)
+:alias_main: paddle.argmin
+:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
+:old_api: paddle.fluid.layers.argmin
+
+
+
**argmin**
该OP沿 ``axis`` 计算输入 ``x`` 的最小元素的索引。
diff --git a/doc/fluid/api_cn/layers_cn/argsort_cn.rst b/doc/fluid/api_cn/layers_cn/argsort_cn.rst
index cc35109750567a42d5069f3d6d120ce8410b3066..a681b3beefadc7c601a42a23a958792b5bac9939 100644
--- a/doc/fluid/api_cn/layers_cn/argsort_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/argsort_cn.rst
@@ -5,11 +5,17 @@ argsort
.. py:function:: paddle.fluid.layers.argsort(input,axis=-1,descending=False,name=None)
+:alias_main: paddle.argsort
+:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
+:old_api: paddle.fluid.layers.argsort
+
+
+
对输入变量沿给定轴进行排序,输出排序好的数据和相应的索引,其维度和输入相同。**默认升序排列,如果需要降序排列设置** ``descending=True`` 。
参数:
- - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64。
+ - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int16、int32、int64、uint8。
- **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。
- **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。
- **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
diff --git a/doc/fluid/api_cn/layers_cn/array_length_cn.rst b/doc/fluid/api_cn/layers_cn/array_length_cn.rst
index b3e94b1b000a6fa541fd7a8b0c901d2d351b8e1c..27fcaf003b8dab728cded431ae24e525d9e4cb47 100644
--- a/doc/fluid/api_cn/layers_cn/array_length_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/array_length_cn.rst
@@ -5,6 +5,9 @@ array_length
.. py:function:: paddle.fluid.layers.array_length(array)
+
+
+
该OP用于获取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 的长度。可以与 :ref:`cn_api_fluid_layers_array_read` 、 :ref:`cn_api_fluid_layers_array_write` 、 :ref:`cn_api_fluid_layers_While` OP结合使用,实现LoDTensorArray的遍历与读写。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/array_read_cn.rst b/doc/fluid/api_cn/layers_cn/array_read_cn.rst
index 1f5d5432a6b9584e189dade7d5bef758f19fd6d5..6e8cd4fb70ec2f31bbb93822883c0cfc491cb691 100644
--- a/doc/fluid/api_cn/layers_cn/array_read_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/array_read_cn.rst
@@ -5,6 +5,9 @@ array_read
.. py:function:: paddle.fluid.layers.array_read(array,i)
+
+
+
该OP用于读取输入数组 :ref:`cn_api_fluid_LoDTensorArray` 中指定位置的数据, ``array`` 为输入的数组, ``i`` 为指定的读取位置。常与 :ref:`cn_api_fluid_layers_array_write` OP配合使用进行LoDTensorArray的读写。
例1:
diff --git a/doc/fluid/api_cn/layers_cn/array_write_cn.rst b/doc/fluid/api_cn/layers_cn/array_write_cn.rst
index 532c950938e3d4fb17bf2bf20cfe7f0551a697ac..4f59d605093261366a4fea7d2e48dd9b4011158c 100644
--- a/doc/fluid/api_cn/layers_cn/array_write_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/array_write_cn.rst
@@ -5,6 +5,9 @@ array_write
.. py:function:: paddle.fluid.layers.array_write(x, i, array=None)
+
+
+
该OP将输入的变量 ``x`` 写入到数组 :ref:`cn_api_fluid_LoDTensorArray` ``array`` 的第i个位置,并返回修改后的LoDTensorArray,如果 ``array`` 为None,则创建一个新的LoDTensorArray。常与 :ref:`cn_api_fluid_layers_array_read` OP联合使用对LoDTensorArray进行读写。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/asin_cn.rst b/doc/fluid/api_cn/layers_cn/asin_cn.rst
index a7fff929d09a7dd5ed1d81615320248e2e620a90..7960b807a60d25f2f4bfb7e3b46695f99e706eac 100644
--- a/doc/fluid/api_cn/layers_cn/asin_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/asin_cn.rst
@@ -5,29 +5,33 @@ asin
.. py:function:: paddle.fluid.layers.asin(x, name=None)
-arcsine激活函数。
+:alias_main: paddle.asin
+:alias: paddle.asin,paddle.tensor.asin,paddle.tensor.math.asin
+:old_api: paddle.fluid.layers.asin
+
+
+
+arcsine函数。
.. math::
out = sin^{-1}(x)
-
参数:
- - **x(Variable)** - asin的输入Tensor,数据类型为 float32 或 float64
- - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回: `asin` 的输出Tensor,数据类型与 `x` 相同。
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
-返回类型: Variable
+返回类型: Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.data(name="input", shape=[4])
- # if data is [-0.8183, 0.4912, -0.6444, 0.0371]
- result = fluid.layers.asin(data)
- # result is [-0.9585, 0.5135, -0.7003, 0.0372]
-
-
+ import paddle
+ paddle.disable_static()
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.asin(x)
+ print(out.numpy())
+ # [-0.41151685 -0.20135792 0.10016742 0.30469265]
diff --git a/doc/fluid/api_cn/layers_cn/assign_cn.rst b/doc/fluid/api_cn/layers_cn/assign_cn.rst
index d01f0a2aaa328cabd84346e828a77cabe54f3a81..da7b3c8f146aa744735c464902f0d633364e8288 100644
--- a/doc/fluid/api_cn/layers_cn/assign_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/assign_cn.rst
@@ -5,6 +5,12 @@ assign
.. py:function:: paddle.fluid.layers.assign(input,output=None)
+:alias_main: paddle.nn.functional.assign
+:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign
+:old_api: paddle.fluid.layers.assign
+
+
+
该OP将输入Tensor或numpy数组拷贝至输出Tensor。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/atan_cn.rst b/doc/fluid/api_cn/layers_cn/atan_cn.rst
index 618151ec5f61f556c41f19417de2206d33b223bb..2b5b11b6f9ffa00fc6bb09520713b22439fea4cf 100644
--- a/doc/fluid/api_cn/layers_cn/atan_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/atan_cn.rst
@@ -5,30 +5,33 @@ atan
.. py:function:: paddle.fluid.layers.atan(x, name=None)
-arctanh激活函数。
+:alias_main: paddle.atan
+:alias: paddle.atan,paddle.tensor.atan,paddle.tensor.math.atan
+:update_api: paddle.fluid.layers.atan
+
+
+
+arctangent函数。
.. math::
- out = tanh^{-1}(x)
+ out = tan^{-1}(x)
参数:
- - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64
- - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回: `atan` 的输出Tensor,数据类型与 `x` 相同。
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
-返回类型: Variable
+返回类型: Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.data(name="input", shape=[4])
- # if data is [-0.8183, 0.4912, -0.6444, 0.0371]
- result = fluid.layers.atan(data)
- # result is [-0.6858, 0.4566, -0.5724, 0.0371]
-
-
-
-
+ import paddle
+ paddle.disable_static()
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.atan(x)
+ print(out.numpy())
+ # [-0.38050638 -0.19739556 0.09966865 0.29145679]
diff --git a/doc/fluid/api_cn/layers_cn/auc_cn.rst b/doc/fluid/api_cn/layers_cn/auc_cn.rst
index f276b1290771a21be33a7ec1f22eb2ec521b4e4a..e915875f5d306abdf7ebf51cc65a2cbdf66ca5de 100755
--- a/doc/fluid/api_cn/layers_cn/auc_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/auc_cn.rst
@@ -5,6 +5,9 @@ auc
.. py:function:: paddle.fluid.layers.auc(input, label, curve='ROC', num_thresholds=200, topk=1, slide_steps=1)
+
+
+
**Area Under the Curve(AUC) Layer**
该层根据前向输出和标签计算AUC,在二分类(binary classification)估计中广泛使用。
diff --git a/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst b/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst
index e3e4768a01166a45f58d41eb6d53e1a3ad689e34..821b793f511e943ec53e253c1df108ac60286fea 100644
--- a/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/autoincreased_step_counter_cn.rst
@@ -3,10 +3,13 @@
autoincreased_step_counter
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.autoincreased_step_counter(counter_name=None, begin=1, step=1)
+:api_attr: 声明式编程模式(静态图)
+
+
+
创建一个自增变量,每个迭代累加一次,默认首次返回值为 1,默认累加步长为 1。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst b/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst
index 3d4699eb690296220eba86d07d5f5c0ca46e8087..95fa58257d7180f8a16176aa7b754f0b20124507 100644
--- a/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/batch_norm_cn.rst
@@ -3,10 +3,13 @@
batch_norm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
批正则化层(Batch Normalization Layer)
可用作卷积和全连接操作的批正则化函数,根据当前批次数据按通道计算的均值和方差进行正则化。该层需要的数据格式如下:
diff --git a/doc/fluid/api_cn/layers_cn/beam_search_cn.rst b/doc/fluid/api_cn/layers_cn/beam_search_cn.rst
index c30d45de27424ae751056cc2169b25280ad7e41f..36174f2a63b1dba8a02b7f34af9d2034e3363d6f 100644
--- a/doc/fluid/api_cn/layers_cn/beam_search_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/beam_search_cn.rst
@@ -5,6 +5,12 @@ beam_search
.. py:function:: paddle.fluid.layers.beam_search(pre_ids, pre_scores, ids, scores, beam_size, end_id, level=0, is_accumulated=True, name=None, return_parent_idx=False)
+:alias_main: paddle.nn.beam_search
+:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search
+:old_api: paddle.fluid.layers.beam_search
+
+
+
束搜索(Beam search)是在机器翻译等生成任务中选择候选词的一种经典算法
更多细节参考 `Beam Search `_
diff --git a/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst b/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst
index 5f5f03be1f1a2792195e421d1cf3cf198e529399..a6b46142e1804180cae16c0ab99625fdafa3299c 100644
--- a/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/beam_search_decode_cn.rst
@@ -5,6 +5,12 @@ beam_search_decode
.. py:function:: paddle.fluid.layers.beam_search_decode(ids, scores, beam_size, end_id, name=None)
+:alias_main: paddle.nn.beam_search_decode
+:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode
+:old_api: paddle.fluid.layers.beam_search_decode
+
+
+
该OP用在整个束搜索(Beam search)结束后,通过沿 ``ids`` 中保存的搜索路径回溯,为每个源句(样本)构造完整的beam search结果序列并保存在LoDTensor中。LoDTensor的格式和解析方式如下:
::
diff --git a/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst b/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst
index 2bf7ce633952f7647e6b8960d5f24e2c44a30e41..d65e2abdc70a6a109b69ae474ed64f49167ff37e 100644
--- a/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/bilinear_tensor_product_cn.rst
@@ -3,10 +3,13 @@
bilinear_tensor_product
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该层对两个输入执行双线性张量积。
例如:
diff --git a/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst b/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst
index a0a68904a79ee252d3db2d00a1a190fb9c7f52c7..23f2832d97d605b6a80af69ec8ceafbdc3ad9d40 100644
--- a/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/bipartite_match_cn.rst
@@ -5,6 +5,12 @@ bipartite_match
.. py:function:: paddle.fluid.layers.bipartite_match(dist_matrix, match_type=None, dist_threshold=None, name=None)
+:alias_main: paddle.nn.functional.bipartite_match
+:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
+:old_api: paddle.fluid.layers.bipartite_match
+
+
+
该OP实现了贪心二分匹配算法,该算法用于根据输入距离矩阵获得与最大距离的匹配。对于输入二维矩阵,二分匹配算法可以找到每一行的匹配列(匹配意味着最大距离),也可以找到每列的匹配行。此算子仅计算列到行的匹配索引。对于每个实例,匹配索引的数量是
输入距离矩阵的列号。**该OP仅支持CPU**
diff --git a/doc/fluid/api_cn/layers_cn/box_clip_cn.rst b/doc/fluid/api_cn/layers_cn/box_clip_cn.rst
index 37d86c883774fe439202036c01ee682a4bb774b7..2b5b4ad767b52a49333a9aada0cfce187e71aa1a 100644
--- a/doc/fluid/api_cn/layers_cn/box_clip_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/box_clip_cn.rst
@@ -5,6 +5,12 @@ box_clip
.. py:function:: paddle.fluid.layers.box_clip(input, im_info, name=None)
+:alias_main: paddle.nn.functional.box_clip
+:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
+:old_api: paddle.fluid.layers.box_clip
+
+
+
将检测框框剪切为 ``im_info`` 给出的大小。对于每个输入框,公式如下:
::
diff --git a/doc/fluid/api_cn/layers_cn/box_coder_cn.rst b/doc/fluid/api_cn/layers_cn/box_coder_cn.rst
index a2e1dc4ddc2f9e1f0e36f890622cf3ba46f53d8c..eb991c1e53e93aa4bfa4b0aa512cf5d78465ee84 100644
--- a/doc/fluid/api_cn/layers_cn/box_coder_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/box_coder_cn.rst
@@ -5,6 +5,12 @@ box_coder
.. py:function:: paddle.fluid.layers.box_coder(prior_box, prior_box_var, target_box, code_type='encode_center_size', box_normalized=True, name=None, axis=0)
+:alias_main: paddle.nn.functional.box_coder
+:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
+:old_api: paddle.fluid.layers.box_coder
+
+
+
Bounding Box Coder
编码/解码带有先验框信息的目标边界框
diff --git a/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst b/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst
index 252a8c5bfb7d8092dbc9a864cbc07bca5436f73b..df65ef34c8563521040a6851646e4b23192f7424 100644
--- a/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/box_decoder_and_assign_cn.rst
@@ -5,6 +5,12 @@ box_decoder_and_assign
.. py:function:: paddle.fluid.layers.box_decoder_and_assign(prior_box, prior_box_var, target_box, box_score, box_clip, name=None)
+:alias_main: paddle.nn.functional.box_decoder_and_assign
+:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
+:old_api: paddle.fluid.layers.box_decoder_and_assign
+
+
+
边界框编码器。
根据先验框来解码目标边界框。
diff --git a/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst b/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst
index 44c38b30634802296af9f29eb45758858790bf62..06195e5198988de8eb5cbdaf2460ea20bc3482bb 100644
--- a/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/bpr_loss_cn.rst
@@ -5,6 +5,12 @@ bpr_loss
.. py:function:: paddle.fluid.layers.bpr_loss(input, label, name=None)
+:alias_main: paddle.nn.functional.bpr_loss
+:alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss
+:old_api: paddle.fluid.layers.bpr_loss
+
+
+
贝叶斯个性化排序损失函数(Bayesian Personalized Ranking Loss Operator )
diff --git a/doc/fluid/api_cn/layers_cn/brelu_cn.rst b/doc/fluid/api_cn/layers_cn/brelu_cn.rst
index 7f65f3e6474a0e7138ec4e5e7afac28d38633db0..0931ad4cea3cfd17ae8476e450762ed7b359c2c9 100644
--- a/doc/fluid/api_cn/layers_cn/brelu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/brelu_cn.rst
@@ -5,6 +5,12 @@ brelu
.. py:function:: paddle.fluid.layers.brelu(x, t_min=0.0, t_max=24.0, name=None)
+:alias_main: paddle.nn.functional.brelu
+:alias: paddle.nn.functional.brelu,paddle.nn.functional.activation.brelu
+:old_api: paddle.fluid.layers.brelu
+
+
+
BReLU 激活函数
diff --git a/doc/fluid/api_cn/layers_cn/case_cn.rst b/doc/fluid/api_cn/layers_cn/case_cn.rst
index cc9892af93819912eccac2cffd70574967d1ab7e..f1f76eae157728e7866eed1ef08e43c41dbe9a1f 100644
--- a/doc/fluid/api_cn/layers_cn/case_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/case_cn.rst
@@ -3,10 +3,16 @@
case
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.case(pred_fn_pairs, default=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+:alias_main: paddle.nn.case
+:alias: paddle.nn.case,paddle.nn.control_flow.case
+:old_api: paddle.fluid.layers.case
+
+
+
该OP的运行方式类似于python的if-elif-elif-else。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/cast_cn.rst b/doc/fluid/api_cn/layers_cn/cast_cn.rst
index 20f847adddfdec1f8ea6a9cf65f4e180d0451c70..8ce45cef9fe74a6016bf1d4f264696cde94d9e71 100644
--- a/doc/fluid/api_cn/layers_cn/cast_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cast_cn.rst
@@ -5,6 +5,12 @@ cast
.. py:function:: paddle.fluid.layers.cast(x,dtype)
+:alias_main: paddle.cast
+:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast
+:old_api: paddle.fluid.layers.cast
+
+
+
该OP将 ``x`` 的数据类型转换为 ``dtype`` 并输出。支持输出和输入的数据类型相同。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/ceil_cn.rst b/doc/fluid/api_cn/layers_cn/ceil_cn.rst
index 76a6e67e66860caa506570ab7b375c7ab23bd7f1..2ee8e634f28e5b154ba365b5f91a519a0b8758f1 100644
--- a/doc/fluid/api_cn/layers_cn/ceil_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/ceil_cn.rst
@@ -5,6 +5,12 @@ ceil
.. py:function:: paddle.fluid.layers.ceil(x, name=None)
+:alias_main: paddle.ceil
+:alias: paddle.ceil,paddle.tensor.ceil,paddle.tensor.math.ceil
+:old_api: paddle.fluid.layers.ceil
+
+
+
向上取整运算函数。
.. math::
@@ -13,24 +19,21 @@ ceil
参数:
- - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32或float64。
- - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回: 输出为Tensor,与 ``x`` 维度相同、数据类型相同。
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
-返回类型: Variable
+返回类型: Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- import numpy as np
+ import paddle
+ paddle.disable_static()
- input_ceil = np.array([[-1.5,6],[1,15.6]])
- with fluid.dygraph.guard():
- x = fluid.dygraph.to_variable(input_ceil)
- y = fluid.layers.ceil(x)
- print(y.numpy())
- # [[-1. 6.]
- # [ 1. 16.]]
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.ceil(x)
+ print(out.numpy())
+ # [-0. -0. 1. 1.]
diff --git a/doc/fluid/api_cn/layers_cn/center_loss_cn.rst b/doc/fluid/api_cn/layers_cn/center_loss_cn.rst
index 3b4a349fe7436048bf5065338bfc37c003fae19e..82be3da0d52e96c7f9a3d97f1943691513ab4450 100644
--- a/doc/fluid/api_cn/layers_cn/center_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/center_loss_cn.rst
@@ -3,10 +3,16 @@
center_loss
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.center_loss(input, label, num_classes, alpha, param_attr, update_center=True)
+:api_attr: 声明式编程模式(静态图)
+:alias_main: paddle.nn.functional.center_loss
+:alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss
+:old_api: paddle.fluid.layers.center_loss
+
+
+
该OP接收一个来自于最后一个隐藏层的输出和目标标签作为输入,返回损失值。为每一个类别提供一个类别中心,计算mini-batch中每个样本与对应类别中心的距离的平均值作为center loss。
对于输入,\(X\)和标签\(Y\),计算公式为:
diff --git a/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst b/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst
index 7542ccc1fa553819deb542d387090d58d57118a9..c264a071ab03ca4a53279da3906db671e499b1d2 100644
--- a/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/chunk_eval_cn.rst
@@ -5,6 +5,9 @@ chunk_eval
.. py:function:: paddle.fluid.layers.chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, sqe_length=None)
+
+
+
该OP计算语块识别(chunk detection)的准确率、召回率和F1值,常用于命名实体识别(NER,语块识别的一种)等序列标注任务中。
语块识别的基础请参考 `Chunking with Support Vector Machines `_
@@ -79,13 +82,13 @@ chunk_eval
dict_size = 10000
label_dict_len = 7
- sequence = fluid.layers.data(
- name='id', shape=[1], lod_level=1, dtype='int64')
- embedding = fluid.layers.embedding(
+ sequence = fluid.data(
+ name='id', shape=[None, 1], lod_level=1, dtype='int64')
+ embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
- label = fluid.layers.data(
- name='label', shape=[1], lod_level=1, dtype='int32')
+ label = fluid.data(
+ name='label', shape=[None, 1], lod_level=1, dtype='int64')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
@@ -94,7 +97,7 @@ chunk_eval
input=crf_decode,
label=label,
chunk_scheme="IOB",
- num_chunk_types=(label_dict_len - 1) / 2)
+ num_chunk_types=int((label_dict_len - 1) / 2))
diff --git a/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst b/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst
index bec5d23dfa4393dc6029cd77203f0fdefdda771d..af3467b58d02d54d02dcac5a792c03f3a5db8405 100644
--- a/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/clip_by_norm_cn.rst
@@ -5,6 +5,12 @@ clip_by_norm
.. py:function:: paddle.fluid.layers.clip_by_norm(x, max_norm, name=None)
+:alias_main: paddle.nn.clip_by_norm
+:alias: paddle.nn.clip_by_norm,paddle.nn.clip.clip_by_norm
+:old_api: paddle.fluid.layers.clip_by_norm
+
+
+
ClipByNorm算子
此算子将输入 ``X`` 的L2范数限制在 ``max_norm`` 内。如果 ``X`` 的L2范数小于或等于 ``max_norm`` ,则输出(Out)将与 ``X`` 相同。如果X的L2范数大于 ``max_norm`` ,则 ``X`` 将被线性缩放,使得输出(Out)的L2范数等于 ``max_norm`` ,如下面的公式所示:
diff --git a/doc/fluid/api_cn/layers_cn/clip_cn.rst b/doc/fluid/api_cn/layers_cn/clip_cn.rst
index b100e6f1f7d2fce8cdec508ffc31087bd65c7425..071fe323ffdf8de41a454669e3b23e2f7f976a4b 100644
--- a/doc/fluid/api_cn/layers_cn/clip_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/clip_cn.rst
@@ -5,6 +5,12 @@ clip
.. py:function:: paddle.fluid.layers.clip(x, min, max, name=None)
+:alias_main: paddle.nn.clip
+:alias: paddle.nn.clip,paddle.nn.clip.clip
+:old_api: paddle.fluid.layers.clip
+
+
+
该OP对输入Tensor每个元素的数值进行裁剪,使得输出Tensor元素的数值被限制在区间[min, max]内。具体的计算公式为如下。
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst
index c895d2ddaea97525818071f9920ba2e87daad709..9c00888befa5a82ec0296f9f51496bea1697f273 100644
--- a/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/collect_fpn_proposals_cn.rst
@@ -5,6 +5,12 @@ collect_fpn_proposals
.. py:function:: paddle.fluid.layers.collect_fpn_proposals(multi_rois, multi_scores, min_level, max_level, post_nms_top_n, name=None)
+:alias_main: paddle.nn.functional.collect_fpn_proposals
+:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
+:old_api: paddle.fluid.layers.collect_fpn_proposals
+
+
+
**该op仅支持LoDTensor输入**。连接多级RoIs(感兴趣区域)并依据multi_scores选择N个RoIs。此操作执行以下步骤:
1、选择num_level个RoIs和scores作为输入:num_level = max_level - min_level
2、连接num_level个RoIs和scores。
diff --git a/doc/fluid/api_cn/layers_cn/concat_cn.rst b/doc/fluid/api_cn/layers_cn/concat_cn.rst
index b35d3f125496b432cf1d7c485d61a3116a32a1a5..a0c2ade9178f1842e355cfd3bdb0e667db38cd2d 100644
--- a/doc/fluid/api_cn/layers_cn/concat_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/concat_cn.rst
@@ -3,18 +3,17 @@
concat
-------------------------------
-.. py:function:: paddle.fluid.layers.concat(input,axis=0,name=None)
+.. py:function:: paddle.fluid.layers.concat(input, axis=0, name=None)
-该OP对输入沿 ``axis`` 轴进行联结。
+
+该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。
参数:
- - **input** (list) - 输入是待联结的多维 ``Tensor`` 组成的 ``list`` ,支持的数据类型为:float32、float64、int32、int64。
- - **axis** (int|Variable,可选) - 整数或者形状为[1]的 ``Tensor``,数据类型为 ``int32``。指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中 ``Tensor`` 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。
+ - **input** (list|tuple|Tensor) - 待联结的Tensor list,Tensor tuple或者Tensor,支持的数据类型为:bool、float16、 float32、float64、int32、int64。 ``input`` 中所有Tensor的数据类型必须一致。
+ - **axis** (int|Tensor,可选) - 指定对输入Tensor进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中Tensor 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。
- **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
-返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 相同。
-
-返回类型:Variable
+返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 中的Tensor相同。
**代码示例**:
@@ -23,18 +22,18 @@ concat
import paddle.fluid as fluid
import numpy as np
- in1 = np.array([[1,2,3],
- [4,5,6]])
- in2 = np.array([[11,12,13],
- [14,15,16]])
- in3 = np.array([[21,22],
- [23,24]])
+ in1 = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ in2 = np.array([[11, 12, 13],
+ [14, 15, 16]])
+ in3 = np.array([[21, 22],
+ [23, 24]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
x2 = fluid.dygraph.to_variable(in2)
x3 = fluid.dygraph.to_variable(in3)
- out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
- out2 = fluid.layers.concat(input=[x1,x2], axis=0)
+ out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
+ out2 = fluid.layers.concat(input=[x1, x2], axis=0)
print(out1.numpy())
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
diff --git a/doc/fluid/api_cn/layers_cn/cond_cn.rst b/doc/fluid/api_cn/layers_cn/cond_cn.rst
index bdca5f2025f9da79678bdf8586d8f59b4a3e59b0..a72495a7392e8a17695a1eee96e286a7bcfba3dc 100644
--- a/doc/fluid/api_cn/layers_cn/cond_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cond_cn.rst
@@ -3,10 +3,16 @@
cond
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.cond(pred, true_fn=None, false_fn=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+:alias_main: paddle.nn.cond
+:alias: paddle.nn.cond,paddle.nn.control_flow.cond
+:old_api: paddle.fluid.layers.cond
+
+
+
如果 ``pred`` 是 ``True`` ,该API返回 ``true_fn()`` ,否则返回 ``false_fn()`` 。
用户如果不想在 ``callable`` 中做任何事,可以把 ``true_fn`` 或 ``false_fn`` 设为 ``None`` ,此时本API会把该 ``callable`` 视为简单返回 ``None`` 。
diff --git a/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst b/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst
index ab90721deaba47438ae18ad2e0598df54cf150be..bdc610b1739df00f4d04f96ee501cfff7e107a94 100644
--- a/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/continuous_value_model_cn.rst
@@ -5,6 +5,12 @@ continuous_value_model
.. py:function:: paddle.fluid.layers.continuous_value_model(input, cvm, use_cvm=True)
+:alias_main: paddle.nn.functional.continuous_value_model
+:alias: paddle.nn.functional.continuous_value_model,paddle.nn.functional.extension.continuous_value_model
+:old_api: paddle.fluid.layers.continuous_value_model
+
+
+
**注意:该OP仅支持在CPU运行。**
该OP在CTR项目中,用于去除或处理 ``input`` 中的展示和点击值。
@@ -31,7 +37,8 @@ continuous_value_model
input=input,
size=[100, 11],
dtype='float32')
- ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
+ label_shape = fluid.layers.shape(label)
+ ones = fluid.layers.fill_constant(shape=[label_shape[0], 1], dtype="int64", value=1)
show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
diff --git a/doc/fluid/api_cn/layers_cn/conv2d_cn.rst b/doc/fluid/api_cn/layers_cn/conv2d_cn.rst
index fb5bcd097e69e16ea166cc7c131641835473c09f..4ae868dfea111869eaa664cc4ca7a763c86651c3 100644
--- a/doc/fluid/api_cn/layers_cn/conv2d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/conv2d_cn.rst
@@ -3,10 +3,13 @@
conv2d
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW")
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。
对每个输入X,有等式:
diff --git a/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst b/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst
index cefc7b688eb7943c127b87f0ccc5efd0c07f9b40..3223f07188db98723157b6d8a53336dd9291653a 100644
--- a/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/conv2d_transpose_cn.rst
@@ -3,10 +3,13 @@
conv2d_transpose
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW')
+:api_attr: 声明式编程模式(静态图)
+
+
+
二维转置卷积层(Convlution2D transpose layer)
该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。
diff --git a/doc/fluid/api_cn/layers_cn/conv3d_cn.rst b/doc/fluid/api_cn/layers_cn/conv3d_cn.rst
index 0462892a9c7f3e025f2a18aa1895074faa7bacfa..7468274e7a27a5ce3984d4fb1e0d13daa2237b05 100644
--- a/doc/fluid/api_cn/layers_cn/conv3d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/conv3d_cn.rst
@@ -3,10 +3,13 @@
conv3d
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW")
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。
对每个输入X,有等式:
diff --git a/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst b/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst
index 9832db8a8618dba95d2e90a1616b7b8e9b2e820b..0331df5d6a4fab39a78c68190b0b1066ec5681a9 100644
--- a/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/conv3d_transpose_cn.rst
@@ -3,10 +3,13 @@
conv3d_transpose
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW')
+:api_attr: 声明式编程模式(静态图)
+
+
+
三维转置卷积层(Convlution3D transpose layer)
该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。
diff --git a/doc/fluid/api_cn/layers_cn/cos_cn.rst b/doc/fluid/api_cn/layers_cn/cos_cn.rst
index 9d0f89e5fff1242363ce8d11f9762638e32070dd..7d0727576e0351584d8d2bd5fc7e0fa2f9f32546 100644
--- a/doc/fluid/api_cn/layers_cn/cos_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cos_cn.rst
@@ -5,34 +5,36 @@ cos
.. py:function:: paddle.fluid.layers.cos(x, name=None)
+:alias_main: paddle.cos
+:alias: paddle.cos,paddle.tensor.cos,paddle.tensor.math.cos
+:old_api: paddle.fluid.layers.cos
+
+
+
余弦函数。
+输入范围是 `(-inf, inf)` , 输出范围是 `[-1,1]`。
+
.. math::
out = cos(x)
-
-
参数:
- - **x** (Variable) - 该OP的输入为多维Tensor,数据类型为float32,float64。
- - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。
-
+ - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。
+ - name (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。
-返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。
+返回:输出Tensor,与 ``x`` 维度相同、数据类型相同。
-返回类型:Variable
+返回类型:Tensor
**代码示例**:
.. code-block:: python
- import paddle.fluid as fluid
- import numpy as np
+ import paddle
+ paddle.disable_static()
- input_cos = np.array([[-1,np.pi],[1,15.6]])
- with fluid.dygraph.guard():
- x = fluid.dygraph.to_variable(input_cos)
- y = fluid.layers.cos(x)
- print(y.numpy())
- # [[ 0.54030231 -1. ]
- # [ 0.54030231 -0.99417763]]
+ x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
+ out = paddle.cos(x)
+ print(out.numpy())
+ # [0.92106099 0.98006658 0.99500417 0.95533649]
diff --git a/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst b/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst
index b6fb77dfc50d1ddcfe8c0251f55a8f0fdb26c9b5..bee83df05f929eb2808de57958d66198e8531d05 100644
--- a/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cos_sim_cn.rst
@@ -5,6 +5,9 @@ cos_sim
.. py:function:: paddle.fluid.layers.cos_sim(X, Y)
+
+
+
余弦相似度算子(Cosine Similarity Operator)
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst b/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst
index b45682cdec8fd7774254b0e36892d321fa8fd94e..9117c06dd61c6bef978cde326fd64075fdeb7657 100644
--- a/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cosine_decay_cn.rst
@@ -5,6 +5,12 @@ cosine_decay
.. py:function:: paddle.fluid.layers.cosine_decay(learning_rate, step_each_epoch, epochs)
+:alias_main: paddle.nn.functional.cosine_decay
+:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay
+:old_api: paddle.fluid.layers.cosine_decay
+
+
+
使用 cosine decay 的衰减方式进行学习率调整。
在训练模型时,建议一边进行训练一边降低学习率。 通过使用此方法,学习速率将通过如下cosine衰减策略进行衰减:
diff --git a/doc/fluid/api_cn/layers_cn/create_array_cn.rst b/doc/fluid/api_cn/layers_cn/create_array_cn.rst
index 805910b24d947e58e6fa292871bc6018c6e8fc21..1426688b11bafd00a8a115aaf3f3eefcd907979b 100644
--- a/doc/fluid/api_cn/layers_cn/create_array_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/create_array_cn.rst
@@ -6,6 +6,9 @@ create_array
.. py:function:: paddle.fluid.layers.create_array(dtype)
+
+
+
此OP创建一个LoDTensorArray,它可以用作 :ref:`cn_api_fluid_layers_array\_write` , :ref:`cn_api_fluid_layers_array\_read` OP的输入,以及和 :ref:`cn_api_fluid_layers_While` OP
一起创建RNN网络。
diff --git a/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst b/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst
index 89d53c61fd91cc22e63cf5267fcb088bafd57414..97034b6dc1c6bc60e8be576cb2332deb1f1d77f7 100644
--- a/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/create_global_var_cn.rst
@@ -5,6 +5,9 @@ create_global_var
.. py:function:: paddle.fluid.layers.create_global_var(shape,value,dtype,persistable=False,force_cpu=False,name=None)
+
+
+
该OP在全局块中创建一个新的Tensor,Tensor的值为 ``value`` 。
参数:
@@ -26,7 +29,7 @@ create_global_var
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
- persistable=True, force_cpu=True, name='new_var')
+ persistable=True, force_cpu=True, name='new_var')
diff --git a/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst b/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst
index 224d2bb577b685294f64a2b800a9da5298ac38c4..4176d7507f80e9767b32755f1169fbcbd502bb6a 100644
--- a/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/create_parameter_cn.rst
@@ -3,10 +3,13 @@
create_parameter
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.create_parameter(shape,dtype,name=None,attr=None,is_bias=False,default_initializer=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP创建一个参数。该参数是一个可学习的变量, 拥有梯度并且可优化。
**注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。**
diff --git a/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst b/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst
index 99eecff605fce6644dbbe5d03d2b69ac8696c2cc..ed42dceaa52ef3be13ffc4d2d50d7458fb317584 100644
--- a/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/create_py_reader_by_data_cn.rst
@@ -3,10 +3,13 @@
create_py_reader_by_data
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.create_py_reader_by_data(capacity,feed_list,name=None,use_double_buffer=True)
+:api_attr: 声明式编程模式(静态图)
+
+
+
创建一个Python端提供数据的reader。该OP与 :ref:`cn_api_fluid_layers_py_reader` 类似,不同点在于它能够从feed变量列表读取数据。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst b/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst
index 651a5c4901cd8664150a94cc326c7ca64ed804d4..d00d67ebbe70f10d06e8d4bd1d96cfcb6ff7145a 100644
--- a/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/create_tensor_cn.rst
@@ -5,6 +5,12 @@ create_tensor
.. py:function:: paddle.fluid.layers.create_tensor(dtype,name=None,persistable=False)
+:alias_main: paddle.create_tensor
+:alias: paddle.create_tensor,paddle.tensor.create_tensor,paddle.tensor.creation.create_tensor
+:old_api: paddle.fluid.layers.create_tensor
+
+
+
创建数据类型为dtype的Tensor。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst b/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst
index 8e9eb802f91d110096e819383e46d39b25e690c7..ea5fd7eb1e56351474a5a72181b8e58e1929811d 100644
--- a/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/crf_decoding_cn.rst
@@ -3,10 +3,13 @@
crf_decoding
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.crf_decoding(input, param_attr, label=None, length=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该层读取由 :ref:`cn_api_fluid_layers_linear_chain_crf` 学习的 emission feature weights(发射状态特征的权重)和 transition feature weights (转移特征的权重) 进行解码。
diff --git a/doc/fluid/api_cn/layers_cn/crop_cn.rst b/doc/fluid/api_cn/layers_cn/crop_cn.rst
index d4ccdfaf8827cff8542ac4d28590d099a1617869..18d375f06d5e4277e93995db4de71a3006cbf0fb 100644
--- a/doc/fluid/api_cn/layers_cn/crop_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/crop_cn.rst
@@ -5,6 +5,9 @@ crop
.. py:function:: paddle.fluid.layers.crop(x, shape=None, offsets=None, name=None)
+
+
+
该OP根据偏移量(offsets)和形状(shape),裁剪输入张量。
**注意:** 此OP已被弃用,它将在以后的版本中被删除,请使用 :ref:`cn_api_fluid_layers_crop_tensor` 替代
diff --git a/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst b/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst
index dcaa6758f9b43bdab2b2aca503306deb8ae0bfb9..79b2de8fcc0259615cb1f5eadc6ee2da4b71359d 100644
--- a/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/crop_tensor_cn.rst
@@ -5,6 +5,12 @@ crop_tensor
.. py:function:: paddle.fluid.layers.crop_tensor(x, shape=None, offsets=None, name=None)
+:alias_main: paddle.crop_tensor
+:alias: paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor
+:old_api: paddle.fluid.layers.crop_tensor
+
+
+
根据偏移量(offsets)和形状(shape),裁剪输入(x)Tensor。
**示例**:
@@ -98,7 +104,7 @@ crop_tensor
# crop3.shape = [-1, 2, 3]
# offsets is a list in which each element is a constant or Tensor
- offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32")
+ offsets_var = fluid.data(name="offset", shape=[1], dtype="int32")
crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var])
# crop4.shape = [-1, 2, 3]
diff --git a/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst b/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst
index 5babd50838f17d9e99ab649e9dcebade6f17c132..be571e656a7ee8cdfbd68af0038fa31a0a7fafc5 100644
--- a/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cross_entropy_cn.rst
@@ -5,6 +5,12 @@ cross_entropy
.. py:function:: paddle.fluid.layers.cross_entropy(input, label, soft_label=False, ignore_index=-100)
+:alias_main: paddle.nn.functional.cross_entropy
+:alias: paddle.nn.functional.cross_entropy,paddle.nn.functional.loss.cross_entropy
+:old_api: paddle.fluid.layers.cross_entropy
+
+
+
该OP计算输入input和标签label间的交叉熵,可用于计算硬标签或软标签的交叉熵。
1. 硬标签交叉熵算法:若soft_label = False, :math:`label[i_1, i_2, ..., i_k]` 表示每个样本的硬标签值:
diff --git a/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst b/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst
index 7df10ca633746d75d105b2c9692b083d200018be..c2d8f67ca85f49c121c2a6530b4ad7bcb1632328 100644
--- a/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/ctc_greedy_decoder_cn.rst
@@ -5,17 +5,21 @@ ctc_greedy_decoder
.. py:function:: paddle.fluid.layers.ctc_greedy_decoder(input, blank, name=None)
-**注意:该OP的输入input必须是2维LoDTensor, lod_level为1**
+
+
+
该OP用于贪婪策略解码序列,步骤如下:
1. 获取输入中的每一行的最大值索引,也就是numpy.argmax(input, axis=0)。
2. 对于step1结果中的每个序列,合并两个空格之间的重复部分并删除所有空格。
+该API支持两种输入,LoDTensor和Tensor输入,不同输入的代码样例如下:
**样例**:
::
+ # for lod tensor input
已知:
input.data = [[0.6, 0.1, 0.3, 0.1],
@@ -45,13 +49,38 @@ ctc_greedy_decoder
output.lod = [[2, 1]]
+ # for tensor input
+ input.data = [[[0.6, 0.1, 0.3, 0.1],
+ [0.3, 0.2, 0.4, 0.1],
+ [0.1, 0.5, 0.1, 0.3],
+ [0.5, 0.1, 0.3, 0.1]],
+
+ [[0.5, 0.1, 0.3, 0.1],
+ [0.2, 0.2, 0.2, 0.4],
+ [0.2, 0.2, 0.1, 0.5],
+ [0.5, 0.1, 0.3, 0.1]]]
+
+ input_length.data = [[4], [4]]
+ input.shape = [2, 4, 4]
+
+ step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
+ [[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
+ step2: Change the argmax result to use padding mode, then argmax result is
+ [[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
+ step3: Apply ctc_align to padding argmax result, padding_value is 0
+
+ Finally:
+ output.data = [[2, 1, 0, 0],
+ [3, 0, 0, 0]]
+ output_length.data = [[2], [1]]
+
参数:
- - **input** (Variable) — 变长序列的概率,2维LoDTensor, lod_level为1。它的形状是[Lp, num_classes + 1],其中Lp是所有输入序列长度的和,num_classes是类别数目(不包括空白标签)。数据类型是float32或者float64
+ - **input** (Variable) — 变长序列的概率, 在输入为LoDTensor情况下,它是具有LoD信息的二维LoDTensor。 形状为[Lp,num_classes +1],其中Lp是所有输入序列的长度之和,num_classes是真实的类数。 在输入为Tensor情况下,它是带有填充的3-D张量,其形状为[batch_size,N,num_classes +1]。 (不包括空白标签)。 数据类型可以是float32或float64。
- **blank** (int) — Connectionist Temporal Classification (CTC) loss空白标签索引, 其数值属于半开区间[0,num_classes + 1)
- **name** (str) — (str|None,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None
-返回: CTC贪婪解码结果是一个形为(Lp,1)的2维LoDTensor,lod_level为1,其中Lp是所有输出序列的长度之和。如果结果中的所有序列都为空,则输出LoDTensor为[-1],其lod信息为空。
+返回:对于输入为LoDTensor的情况,返回CTC贪婪解码器的结果,即2-D LoDTensor,形状为[Lp,1],数据类型为int64。 “ Lp”是所有输出序列长度的总和。 如果结果中的所有序列均为空,则结果LoDTensor将为[-1],其中LoD为[[]]。对于输入为Tensor的情况,返回一个元组,(output, output_length), 其中,output是一个形状为 [batch_size, N],类型为int64的Tensor。output_length是一个形状为[batch_size, 1],类型为int64的Tensor,表示Tensor输入下,每个输出序列的长度。
返回类型: Variable
@@ -60,9 +89,15 @@ ctc_greedy_decoder
.. code-block:: python
+ # for lod mode
import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[8], dtype='float32')
+ x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
+ # for padding mode
+ x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
+ x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
+ out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
+ input_length=x_pad_len)
diff --git a/doc/fluid/api_cn/layers_cn/cumsum_cn.rst b/doc/fluid/api_cn/layers_cn/cumsum_cn.rst
index 623cf6bc359f1b7ba1127f157c5d1e66627b0b3f..8e6f238b87381651e08b0a0dac4fa441b7605683 100644
--- a/doc/fluid/api_cn/layers_cn/cumsum_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/cumsum_cn.rst
@@ -5,6 +5,7 @@ cumsum
.. py:function:: paddle.fluid.layers.cumsum(x,axis=None,exclusive=None,reverse=None)
+
沿给定轴(axis)的元素的累加和。默认结果的第一个元素和输入的第一个元素一致。如果exlusive为True,结果的第一个元素则为0。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/data_cn.rst b/doc/fluid/api_cn/layers_cn/data_cn.rst
index 298dcf6a85cbec50936ae3c6962157dc3b66e87e..d73c7d55181d91c4115fca7ec8c8f8edcb7a35a5 100644
--- a/doc/fluid/api_cn/layers_cn/data_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/data_cn.rst
@@ -3,10 +3,12 @@
data
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=VarType.LOD_TENSOR, stop_gradient=True)
+
+
+
该OP会在全局block中创建变量(Variable),该全局变量可被计算图中的算子(operator)访问。
注意:
diff --git a/doc/fluid/api_cn/layers_cn/data_norm_cn.rst b/doc/fluid/api_cn/layers_cn/data_norm_cn.rst
index c98363e67f1e58ed7b232ad44a13ee1e12ad1533..4954dfa22a7c67baa441c7a209ab7df87d8cc4d8 100644
--- a/doc/fluid/api_cn/layers_cn/data_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/data_norm_cn.rst
@@ -3,10 +3,13 @@
data_norm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**数据正则化层**
可用作conv2d和fully_connected操作的正则化函数。 此层所需的数据格式为以下之一:
@@ -39,6 +42,7 @@ data_norm
- **slot_dim** (int, 默认值为-1) - 一个slot的embedding维度,slot用来表征一类特征的集合,在pslib模式下,通常我们通过slot区分特征id,并从参数服务器(pslib)中提取它们的embedding。embedding的第一维是历史上这个embedding展示的次数。如果本op的输入是由这样的embedding连接而来,那么当这个特征id是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了slot_dim来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。
- **summary_decay_rate** (float, 默认值为0.9999999) - 更新summary信息时的衰减率。
- **sync_stats** (bool, 默认值False) - 在多GPU卡的场景下可以使用,用来同步多卡间的summary信息。
+ - **enable_scale_and_shift** (bool, 默认值False) - 在分布式全局正则化后是否做像batchnorm一样做scale&shift的操作。
返回: 张量变量,是对输入数据进行正则化后的结果。
diff --git a/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst b/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst
index 55bcb172e759147c4bddbd6b78885652bb538a1a..a74315d5ad7fbcc54bef8869c7ebd11433450b2d 100644
--- a/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/deformable_conv_cn.rst
@@ -3,10 +3,13 @@
deformable_conv
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**可变形卷积算子**
deformable_conv op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示:
diff --git a/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst b/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst
index bf9d96c1545afb29e0caa9ab6ad5151560693a82..bcccb58ca3fd10fd79903184f59362b936e63804 100644
--- a/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/deformable_roi_pooling_cn.rst
@@ -5,6 +5,12 @@ deformable_roi_pooling
.. py:function:: paddle.fluid.layers.deformable_roi_pooling(input, rois, trans, no_trans=False, spatial_scale=1.0, group_size=[1, 1], pooled_height=1, pooled_width=1, part_size=None, sample_per_part=1, trans_std=0.1, position_sensitive=False, name=None)
+:alias_main: paddle.nn.functional.deformable_roi_pooling
+:alias: paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling
+:old_api: paddle.fluid.layers.deformable_roi_pooling
+
+
+
可变形感兴趣区域(ROI)池化层
该OP对输入进行了可形变的感兴趣区域(ROI)池化操作。如同 `可形变卷积网络 `_ 描述的一样,它将为每个bin中的像素获取一个偏移量,以便于在合适的位置进行池化。在完成可变形感兴趣区域(ROI)池化操作之后,批量数将变为候选框的数量。
@@ -42,7 +48,7 @@ deformable_roi_pooling
.. code-block:: python
- #position_sensitive为False
+ #position_sensitive=False
import paddle.fluid as fluid
input = fluid.data(name="input",
@@ -68,7 +74,7 @@ deformable_roi_pooling
trans_std=0.1,
position_sensitive=False)
- #position_sensitive为True
+ #position_sensitive=True
import paddle.fluid as fluid
input = fluid.data(name="input",
diff --git a/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst b/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst
index 0b122b32ea4db9298f6d74218ff1091cd3b8ced4..c88bff497978ffa81b31b332df065ff2e265c77a 100644
--- a/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/density_prior_box_cn.rst
@@ -5,6 +5,12 @@ density_prior_box
.. py:function:: paddle.fluid.layers.density_prior_box(input, image, densities=None, fixed_sizes=None, fixed_ratios=None, variance=[0.1, 0.1, 0.2, 0.2], clip=False, steps=[0.0, 0.0], offset=0.5, flatten_to_2d=False, name=None)
+:alias_main: paddle.nn.functional.density_prior_box
+:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
+:old_api: paddle.fluid.layers.density_prior_box
+
+
+
该OP为SSD算法(Single Shot MultiBox Detector)生成density prior box,在每个 ``input`` 的位置产生N个候选框,其中,N由 ``densities`` , ``fixed_sizes`` 和 ``fixed_ratios`` 来计算。生成的每个输入位置附近的候选框中心(网格点)由 ``densities`` 和 ``density prior box`` 的数量计算,其中 ``density prior box`` 的数量由 ``fixed_sizes`` 和 ``fixed_ratios`` 决定。``fixed_sizes`` 和 ``densities`` 的大小一致。
diff --git a/doc/fluid/api_cn/layers_cn/detection_output_cn.rst b/doc/fluid/api_cn/layers_cn/detection_output_cn.rst
index b2c639da9c2c03d1113504430a6d8f41a45122cd..9d39e7881dc9b191dcec464b3a190404aef933d1 100644
--- a/doc/fluid/api_cn/layers_cn/detection_output_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/detection_output_cn.rst
@@ -5,6 +5,12 @@ detection_output
.. py:function:: paddle.fluid.layers.detection_output(loc, scores, prior_box, prior_box_var, background_label=0, nms_threshold=0.3, nms_top_k=400, keep_top_k=200, score_threshold=0.01, nms_eta=1.0)
+:alias_main: paddle.nn.functional.detection_output
+:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
+:old_api: paddle.fluid.layers.detection_output
+
+
+
给定回归位置偏移、置信度以及先验框信息计算检测的输出,执行步骤如下:
1.根据先验框(``prior_box``)信息和回归位置偏移解码出预测框坐标。
diff --git a/doc/fluid/api_cn/layers_cn/diag_cn.rst b/doc/fluid/api_cn/layers_cn/diag_cn.rst
index 692b64dc5d30e4ecfaf43e8120ccb1972f2971f4..f08ae61e0e44c620ee20cadc979c91450aec9010 100644
--- a/doc/fluid/api_cn/layers_cn/diag_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/diag_cn.rst
@@ -5,6 +5,12 @@ diag
.. py:function:: paddle.fluid.layers.diag(diagonal)
+:alias_main: paddle.diag
+:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
+:old_api: paddle.fluid.layers.diag
+
+
+
该OP创建一个方阵,使用输入diagonal来指定方阵的对角线元素的值。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst b/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst
index 52e0ce76b376bc7c215415aec549bc2a8d8c19ad..af63877e359bf830d35985556e6e94e215adba75 100644
--- a/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dice_loss_cn.rst
@@ -5,6 +5,12 @@ dice_loss
.. py:function:: paddle.fluid.layers.dice_loss(input, label, epsilon=1e-05)
+:alias_main: paddle.nn.functional.dice_loss
+:alias: paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss
+:old_api: paddle.fluid.layers.dice_loss
+
+
+
该OP用来比较预测结果跟标签之间的相似度,通常用于二值图像分割,即标签为二值,也可以做多标签的分割。
dice_loss定义为:
diff --git a/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst
index b12dc2cfba777b7552ac6c83cac8fcb103263783..33cbb83a59486ec75eef9962b0af3035f61e3415 100644
--- a/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/distribute_fpn_proposals_cn.rst
@@ -5,6 +5,12 @@ distribute_fpn_proposals
.. py:function:: paddle.fluid.layers.distribute_fpn_proposals(fpn_rois, min_level, max_level, refer_level, refer_scale, name=None)
+:alias_main: paddle.nn.functional.distribute_fpn_proposals
+:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
+:old_api: paddle.fluid.layers.distribute_fpn_proposals
+
+
+
**该op仅支持LoDTensor输入**。在 Feature Pyramid Networks(FPN)模型中,需要依据proposal的尺度和参考尺度与级别将所有proposal分配到不同的FPN级别中。 此外,为了恢复proposals的顺序,我们返回一个数组,该数组表示当前proposals中的原始RoIs索引。 要计算每个RoI的FPN级别,公式如下:
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst b/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst
index f5a6ed9f70ee895f42259d1204089a4fa4c9446a..f9fe3b110ab2db024599bf4be1687a15f4c1006c 100644
--- a/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/double_buffer_cn.rst
@@ -6,6 +6,9 @@ double_buffer
.. py:function:: paddle.fluid.layers.double_buffer(reader, place=None, name=None)
+
+
+
生成一个双缓冲队列Reader。Reader类有DecoratedReader和FileReader,其中DecoratedReader又可以细分成CustomReader和BufferedReader。这里是基于BufferedReader,数据将复制到具有双缓冲队列的位置(由place指定),如果 ``place=None`` 则将使用executor执行的位置。
参数:
@@ -24,13 +27,13 @@ double_buffer
.. code-block:: python
- import paddle.fluid as fluid
- reader = fluid.layers.open_files(filenames=['mnist.recordio'],
- shapes=[[-1, 784], [-1, 1]],
- lod_levels=[0, 0],
- dtypes=['float32', 'int64'])
- reader = fluid.layers.double_buffer(reader)
- img, label = fluid.layers.read_file(reader)
+ import paddle.fluid as fluid
+ reader = fluid.layers.py_reader(capacity=64,
+ shapes=[(-1, 1, 28, 28), (-1, 1)],
+ dtypes=['float32', 'int64'],
+ use_double_buffer=False)
+ reader = fluid.layers.double_buffer(reader)
+ image, label = fluid.layers.read_file(reader)
diff --git a/doc/fluid/api_cn/layers_cn/dropout_cn.rst b/doc/fluid/api_cn/layers_cn/dropout_cn.rst
index ec3149aa993374469a5fc597ab3eef962fd3e349..8c748ec91af6395405bb46a43b3e94b59ebfa153 100644
--- a/doc/fluid/api_cn/layers_cn/dropout_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dropout_cn.rst
@@ -5,6 +5,12 @@ dropout
.. py:function:: paddle.fluid.layers.dropout(x,dropout_prob,is_test=False,seed=None,name=None,dropout_implementation='downgrade_in_infer')
+:alias_main: paddle.nn.functional.dropout
+:alias: paddle.nn.functional.dropout,paddle.nn.functional.common.dropout
+:old_api: paddle.fluid.layers.dropout
+
+
+
dropout操作
丢弃或者保持x的每个元素独立。Dropout是一种正则化手段,通过在训练过程中阻止神经元节点间的相关性来减少过拟合。根据给定的丢弃概率,dropout操作符按丢弃概率随机将一些神经元输出设置为0,其他的仍保持不变。
diff --git a/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst
index d21497a511bd9b74edac6be9b06ca51b00841642..ea289057a13d6d8d572c15350a6d87d3d072f03d 100644
--- a/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dynamic_decode_cn.rst
@@ -4,10 +4,13 @@ dynamic_decode
-------------------------------
-**注意:该API仅支持【静态图】模式**
-.. py:method:: dynamic_decode(decoder, inits=None, max_step_num=None, output_time_major=False, **kwargs):
-
+.. py:method:: dynamic_decode(decoder, inits=None, max_step_num=None, output_time_major=False, impute_finished=False, is_test=False, return_length=False, **kwargs):
+
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口重复执行 :code:`decoder.step()` 直到 其返回的表示完成状态的Tensor中的值全部为True或解码步骤达到 :code:`max_step_num`。
:code:`decode.initialize()` 会在解码循环之前被调用一次。如果 :code:`decoder` 实现了 :code:`finalize` 方法,则 :code:`decoder.finalize()` 在解码循环后将被调用一次。
@@ -17,9 +20,12 @@ dynamic_decode
- **inits** (object,可选) - 传递给 :code:`decoder.initialize` 的参数。默认为None。
- **max_step_num** (int,可选) - 最大步数。如果未提供,解码直到解码过程完成( :code:`decode.step()` 返回的表示完成状态的Tensor中的值全部为True)。默认为None。
- **output_time_major** (bool,可选) - 指明最终输出(此方法的第一个返回值)中包含的Tensor的数据布局。如果为False,其将使用batch优先的数据布局, 此时的形状为 :math:`[batch\_size,seq\_len,...]`。如果为True,其将使用time优先的数据布局,此时的形状为 :math:`[seq\_len,batch\_size,...]`。默认值为False。
+ - **impute_finished** (bool,可选) - 若为True,对于当前批次中完成状态为结束的样本,将会拷贝其上一步的状态,而非像未结束的实例那样使用 :code:`decode.step()` 返回的 :code:`next_states` 作为新的状态,这保证了返回的最终状态 :code:`final_states` 是正确的;否则,不会区分是否结束,也没有这个拷贝操作。若 :code:`final_states` 会被使用,则这里应该设置为True,这会一定程度上影响速度。默认为False。
+ - **is_test** (bool,可选) - 标识是否是预测模式,预测模式下内存占用会更少。默认为False。
+ - **return_length** (bool,可选) - 标识是否在返回的元组中额外包含一个存放了所有解码序列实际长度的Tensor。默认为False。
- **kwargs** - 其他命名关键字参数。这些参数将传递给 :code:`decoder.step`。
-返回:一个二元组 :code:`(final_outputs,final_states)`, 其包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.output_dtype` 相同的结构和数据类型, 其中的每个tensor都是对所有解码时间步对应输出的堆叠。 这些tensor也可能会通过 :code:`decoder.finalize` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize` 返回的初始状态具有相同的结构,其中的tensor也具有相同的形状 和数据类型。
+返回:若 :code:`return_length` 为True,则返回三元组 :code:`(final_outputs, final_states, sequence_lengths)` ,否则返回二元组 :code:`(final_outputs, final_states)` 。 :code:`final_outputs, final_states` 包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.step()` 返回的 :code:`outputs` 相同的结构和数据类型, 且其中的每个tensor都是将所有解码步中与其对应的的输出进行堆叠的结果;如果 :code:`decoder` 实现了 :code:`finalize` 方法,这些tensor也可能会通过 :code:`decoder.finalize()` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize()` 返回的初始状态具有相同的结构,形状和数据类型。:code:`sequence_lengths` 是int64类型的tensor,和 :code:`decoder.initialize()` 返回的 :code:`finished` 具有相同的形状,其保存了所有解码序列实际长度。
返回类型:tuple
diff --git a/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst
index 5ef7a469dcd7ca8c881f06e45b2f36c021e736b8..995ac50067ac3dfbf2b0cbbcaaf8ba0a417f8c12 100644
--- a/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dynamic_gru_cn.rst
@@ -3,10 +3,13 @@
dynamic_gru
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.dynamic_gru(input, size, param_attr=None, bias_attr=None, is_reverse=False, gate_activation='sigmoid', candidate_activation='tanh', h_0=None, origin_mode=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用StaticRNN(fluid.layers.** :ref:`cn_api_fluid_layers_StaticRNN` **)。**
diff --git a/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst
index d5635229f2bbdc02c2831f0b4b7a6aed0142dc68..5c6bc406ba23f5d80dcf6586a9b32ead36019b72 100644
--- a/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dynamic_lstm_cn.rst
@@ -3,10 +3,13 @@
dynamic_lstm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.dynamic_lstm(input, size, h_0=None, c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', dtype='float32', name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。
.. note::
diff --git a/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst b/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst
index ce3a45f9fcb681b00a52af17ee2f84ca5b01918e..2306948c00f814e4cee1aa39e819646ab204a91e 100644
--- a/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/dynamic_lstmp_cn.rst
@@ -2,10 +2,13 @@
dynamic_lstmp
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.dynamic_lstmp(input, size, proj_size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', dtype='float32', name=None, h_0=None, c_0=None, cell_clip=None, proj_clip=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
.. note::
在实现的时候为了提升效率,用户必须将输入先进行线性映射,将维度为 [T, hidden_size] 的输入映射为 [T, 4×hidden_size] 输入,然后再传给该OP。
diff --git a/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst b/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst
index 0cf28491a2fa0b0f3bfc27e3d95cc80217c6e5de..5d35868e1336614c624f5565429537bb6698e146 100644
--- a/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/edit_distance_cn.rst
@@ -6,6 +6,12 @@ edit_distance
.. py:function:: paddle.fluid.layers.edit_distance(input,label,normalized=True,ignored_tokens=None, input_length=None, label_length=None)
+:alias_main: paddle.nn.functional.edit_distance
+:alias: paddle.nn.functional.edit_distance,paddle.nn.functional.loss.edit_distance
+:old_api: paddle.fluid.layers.edit_distance
+
+
+
该OP计算一批给定字符串及其参照字符串间的编辑距离。编辑距离也称Levenshtein距离,通过计算从一个字符串变成另一个字符串所需的最少操作步骤来衡量两个字符串的相异度。这里的操作包括插入、删除和替换。
比如给定假设字符串A=“kitten”和参照字符串B=“sitting”,从A变换成B编辑距离为3,至少需要两次替换和一次插入:
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst
index d1312347fc76bfb8054e7a477a69f6e891b9f563..7414eae16ca524e5388de7a09c2e104aa0174570 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_add_cn.rst
@@ -5,6 +5,12 @@ elementwise_add
.. py:function:: paddle.fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None)
+:alias_main: paddle.elementwise_add
+:alias: paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add
+:old_api: paddle.fluid.layers.elementwise_add
+
+
+
该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。
等式为:
@@ -101,7 +107,7 @@ elementwise_add
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
+ y = fluid.layers.data(name="y", shape=[5], dtype='float32')
# z = x + y
z = fluid.layers.elementwise_add(x, y, axis=3)
place = fluid.CPUPlace()
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst
index 562fe7d021eb37a199bc897a3bfc74c2dc560930..d4d12f36b20d39247345933090f2c8b2215b14df 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_div_cn.rst
@@ -5,6 +5,12 @@ elementwise_div
.. py:function:: paddle.fluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None)
+:alias_main: paddle.elementwise_div
+:alias: paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div
+:old_api: paddle.fluid.layers.elementwise_div
+
+
+
该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。
等式是:
@@ -101,7 +107,7 @@ elementwise_div
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
+ y = fluid.layers.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
# z = x / y
place = fluid.CPUPlace()
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst
index 0b6b3b127e633fc2c8d74db6afa0e0e7467c0aec..bd19626b6dc05d5e7f59571960eb2366ce4dd389 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_floordiv_cn.rst
@@ -5,6 +5,12 @@ elementwise_floordiv
.. py:function:: paddle.fluid.layers.elementwise_floordiv(x, y, axis=-1, act=None, name=None)
+:alias_main: paddle.elementwise_floordiv
+:alias: paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv
+:old_api: paddle.fluid.layers.elementwise_floordiv
+
+
+
该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。
等式为:
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst
index b29f6552b739c9246681f3372e697cd6c4c86c76..b36097fbc71d17711142628ac985e35bdd415f00 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_max_cn.rst
@@ -4,6 +4,12 @@ elementwise_max
-------------------------------
.. py:function:: paddle.fluid.layers.elementwise_max(x, y, axis=-1, act=None, name=None)
+
+:alias_main: paddle.elementwise_max
+:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max
+:old_api: paddle.fluid.layers.elementwise_max
+
+
该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。
等式是:
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst
index 646dcdc1b83ac74b0d1799bd26edfc392a3b3192..22669884e0343785de9263c9c5769ee24fce4bdf 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_min_cn.rst
@@ -4,6 +4,12 @@ elementwise_min
-------------------------------
.. py:function:: paddle.fluid.layers.elementwise_min(x, y, axis=-1, act=None, name=None)
+
+:alias_main: paddle.elementwise_min
+:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min
+:old_api: paddle.fluid.layers.elementwise_min
+
+
该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。
等式是:
@@ -60,7 +66,7 @@ elementwise_min
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[3], dtype='float32')
- z = fluid.layers.elementwise_max(x, y)
+ z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
@@ -84,7 +90,7 @@ elementwise_min
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
- z = fluid.layers.elementwise_max(x, y, axis=1)
+ z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst
index c83975a358711cd851c0c4035dc72f2f3b4bd963..5e53e8379be388a068554986709826b6db7b0cc1 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_mod_cn.rst
@@ -5,6 +5,12 @@ elementwise_mod
.. py:function:: paddle.fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None)
+:alias_main: paddle.elementwise_mod
+:alias: paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod
+:old_api: paddle.fluid.layers.elementwise_mod
+
+
+
该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。
等式为:
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst
deleted file mode 100644
index 4a0b56f9ef88c577947a5751f06e52cb43300bfb..0000000000000000000000000000000000000000
--- a/doc/fluid/api_cn/layers_cn/elementwise_mul_cn.rst
+++ /dev/null
@@ -1,117 +0,0 @@
-.. _cn_api_fluid_layers_elementwise_mul:
-
-elementwise_mul
--------------------------------
-
-.. py:function:: paddle.fluid.layers.elementwise_mul(x, y, axis=-1, act=None, name=None)
-
-该OP是逐元素相乘算子,输入 ``x`` 与输入 ``y`` 逐元素相乘,并将各个位置的输出元素保存到返回结果中。
-
-等式是:
-
-.. math::
- Out = X \odot Y
-
-- :math:`X` :多维Tensor。
-- :math:`Y` :维度必须小于等于X维度的Tensor。
-
-对于这个运算算子有2种情况:
- 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。
- 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。
-
-对于情况2:
- 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。
- 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。
- 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。
-
-例如:
-
-.. code-block:: text
-
- shape(X) = (2, 3, 4, 5), shape(Y) = (,)
- shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
- shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
- shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
- shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
- shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
-
-参数:
- - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。
- - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。
- - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。
- - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。
- - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。
-
-
-返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。
-
-返回类型: Variable。
-
-**代码示例 1**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import numpy as np
- def gen_data():
- return {
- "x": np.array([2, 3, 4]),
- "y": np.array([1, 5, 2])
- }
- x = fluid.layers.data(name="x", shape=[3], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3], dtype='float32')
- z = fluid.layers.elementwise_mul(x, y)
- # z = x * y
- place = fluid.CPUPlace()
- exe = fluid.Executor(place)
- z_value = exe.run(feed=gen_data(),
- fetch_list=[z.name])
- print(z_value) # [2., 15., 8.]
-
-**代码示例 2**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import numpy as np
- def gen_data():
- return {
- "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
- "y": np.random.randint(1, 5, size=[3, 4]).astype('float32')
- }
- x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
- z = fluid.layers.elementwise_mul(x, y, axis=1)
- # z = x * y
- place = fluid.CPUPlace()
- exe = fluid.Executor(place)
- z_value = exe.run(feed=gen_data(),
- fetch_list=[z.name])
- print(z_value) # z.shape=[2,3,4,5]
-
-**代码示例 3**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- import numpy as np
- def gen_data():
- return {
- "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
- "y": np.random.randint(1, 5, size=[5]).astype('float32')
- }
- x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
- z = fluid.layers.elementwise_mul(x, y, axis=3)
- # z = x * y
- place = fluid.CPUPlace()
- exe = fluid.Executor(place)
- z_value = exe.run(feed=gen_data(),
- fetch_list=[z.name])
- print(z_value) # z.shape=[2,3,4,5]
-
-
-
-
-
-
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst
index cff5ed77cd0b17764e2a19ec629c31027c4c25c4..6f08b313a4a95d7ba76aef597fc07a43fcbbe884 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_pow_cn.rst
@@ -4,6 +4,12 @@ elementwise_pow
-------------------------------
.. py:function:: paddle.fluid.layers.elementwise_pow(x, y, axis=-1, act=None, name=None)
+
+:alias_main: paddle.elementwise_pow
+:alias: paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow
+:old_api: paddle.fluid.layers.elementwise_pow
+
+
该OP逐元素对输入Tensor进行幂操作。
等式是:
diff --git a/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst b/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst
index 97c546b05ff5343a8d45554a9bfadd05a99cad80..c5886ad2e0fa696aad8ae192ec8a0925aa6f1e6b 100644
--- a/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elementwise_sub_cn.rst
@@ -5,6 +5,12 @@ elementwise_sub
.. py:function:: paddle.fluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None)
+:alias_main: paddle.elementwise_sub
+:alias: paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub
+:old_api: paddle.fluid.layers.elementwise_sub
+
+
+
该OP是逐元素相减算子,输入 ``x`` 与输入 ``y`` 逐元素相减,并将各个位置的输出元素保存到返回结果中。
等式是:
@@ -101,7 +107,7 @@ elementwise_sub
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
- y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
+ y = fluid.layers.data(name="y", shape=[5], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
# z = x - y
place = fluid.CPUPlace()
diff --git a/doc/fluid/api_cn/layers_cn/elu_cn.rst b/doc/fluid/api_cn/layers_cn/elu_cn.rst
index 3211148097ec928a3291c550f032bca18031eaf5..6d527ce9359d4b2561e7018b72e79839d158d8c2 100644
--- a/doc/fluid/api_cn/layers_cn/elu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/elu_cn.rst
@@ -5,6 +5,12 @@ elu
.. py:function:: paddle.fluid.layers.elu(x, alpha=1.0, name=None)
+:alias_main: paddle.nn.functional.elu
+:alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu
+:old_api: paddle.fluid.layers.elu
+
+
+
ELU激活层(ELU Activation Operator)
根据 https://arxiv.org/abs/1511.07289 对输入Tensor中每个元素应用以下计算。
diff --git a/doc/fluid/api_cn/layers_cn/embedding_cn.rst b/doc/fluid/api_cn/layers_cn/embedding_cn.rst
index d494c7f657a6fc9ddb66f324753cb5eb4c7db529..b5ad3607114832a799d763d7d68b05a60d65a55d 100644
--- a/doc/fluid/api_cn/layers_cn/embedding_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/embedding_cn.rst
@@ -3,10 +3,13 @@
embedding
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32')
+:api_attr: 声明式编程模式(静态图)
+
+
+
嵌入层(Embedding Layer)
**注意:此OP将在未来的版本中被移除!该OP要求输入Tensor shape的最后一维必须为1。推荐使用fluid.** :ref:`cn_api_fluid_embedding` 。
@@ -74,6 +77,8 @@ embedding
.. code-block:: python
import paddle.fluid as fluid
+ import numpy as np
+
data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
# 示例 1
diff --git a/doc/fluid/api_cn/layers_cn/equal_cn.rst b/doc/fluid/api_cn/layers_cn/equal_cn.rst
index 68643936258fc67173fb2a55111d128af965104f..9a66e76cedc7d3997fe8e6cbfefca91232f5734b 100644
--- a/doc/fluid/api_cn/layers_cn/equal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/equal_cn.rst
@@ -3,15 +3,16 @@
equal
-------------------------------
-.. py:function:: paddle.fluid.layers.equal(x,y,cond=None)
+.. py:function:: paddle.fluid.layers.equal(x, y, cond=None, name=None)
+
该OP返回 :math:`x==y` 逐元素比较x和y是否相等,x和y的维度应该相同。
参数:
- **x** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64,int32, int64。
- **y** (Variable) - 输入Tensor,支持的数据类型包括 float32, float64, int32, int64。
- - **cond** (Variable,可选) - 逐元素比较的结果Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
- - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。
+ - **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的Tensor,输出Tensor的shape和输入一致,Tensor数据类型为bool。
@@ -23,12 +24,16 @@ equal
import paddle.fluid as fluid
import numpy as np
+
out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
- out0 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
- out1 = fluid.layers.equal(x=label,y=limit, cond=out_cond) #out2=[True, False] out_cond=[True, False]
- out2 = fluid.layers.equal(x=label,y=limit,force_cpu=False) #out3=[True, False]
- out3 = label == limit # out3=[True, False]
+ label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
+
+ out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
+ out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
+
+
+
diff --git a/doc/fluid/api_cn/layers_cn/erf_cn.rst b/doc/fluid/api_cn/layers_cn/erf_cn.rst
index 0c80d4fd54998abd2614a7d946b197c5dcef0e95..c30dc7175303dc3890049ad49cf5ee39505ece2c 100644
--- a/doc/fluid/api_cn/layers_cn/erf_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/erf_cn.rst
@@ -5,6 +5,12 @@ erf
.. py:function:: paddle.fluid.layers.erf(x)
+:alias_main: paddle.erf
+:alias: paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf
+:old_api: paddle.fluid.layers.erf
+
+
+
逐元素计算 Erf 激活函数。更多细节请参考 `Error function `_ 。
diff --git a/doc/fluid/api_cn/layers_cn/exp_cn.rst b/doc/fluid/api_cn/layers_cn/exp_cn.rst
index 4959d916b95bbe336708baad69a1e8f6298d4ea9..33f053e947410c7be2111cfdb826fdf21295059a 100644
--- a/doc/fluid/api_cn/layers_cn/exp_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/exp_cn.rst
@@ -5,6 +5,12 @@ exp
.. py:function:: paddle.fluid.layers.exp(x, name=None)
+:alias_main: paddle.exp
+:alias: paddle.exp,paddle.tensor.exp,paddle.tensor.math.exp
+:old_api: paddle.fluid.layers.exp
+
+
+
对输入,逐元素进行以自然数e为底指数运算。
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/expand_as_cn.rst b/doc/fluid/api_cn/layers_cn/expand_as_cn.rst
index d207d284274053f454b05fa162cdda9d31bf4f99..3781c7c9343c5dc73c05f778814b9ca8b4b4bb50 100644
--- a/doc/fluid/api_cn/layers_cn/expand_as_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/expand_as_cn.rst
@@ -5,6 +5,12 @@ expand_as
.. py:function:: paddle.fluid.layers.expand_as(x, target_tensor, name=None)
+:alias_main: paddle.expand_as
+:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
+:old_api: paddle.fluid.layers.expand_as
+
+
+
该OP会根据输入的variable ``target_tensor`` 对输入 ``x`` 的各维度进行广播。通过 ``target_tensor``的维度来为 ``x`` 的每个维度设置广播的次数,使得x 的维度与target_tensor的维度相同。 ``x`` 的秩应小于等于6。注意, ``target_tensor`` 的秩必须与 ``x`` 的秩相同。
注意:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。比如,target_tensor的维度为[2,6,2],x为[2,3,1],则整除后为[1,2,2],x广播后维度为[2,6,2]。如果target_tensor的维度为[2,5,2],第二维5不能整除x的第二维3,则会报错。
diff --git a/doc/fluid/api_cn/layers_cn/expand_cn.rst b/doc/fluid/api_cn/layers_cn/expand_cn.rst
index 22ef8d21ff7b4b2692d6792ff081b895c96783ba..6bd61b1587e60420df20942a8da8c6382a1eef59 100644
--- a/doc/fluid/api_cn/layers_cn/expand_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/expand_cn.rst
@@ -5,6 +5,12 @@ expand
.. py:function:: paddle.fluid.layers.expand(x, expand_times, name=None)
+:alias_main: paddle.expand
+:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand
+:old_api: paddle.fluid.layers.expand
+
+
+
该OP会根据参数 ``expand_times`` 对输入 ``x`` 的各维度进行复制。通过参数 ``expand_times`` 来为 ``x`` 的每个维度设置复制次数。 ``x`` 的秩应小于等于6。注意, ``expand_times`` 的大小必须与 ``x`` 的秩相同。以下是一个用例:
::
diff --git a/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst b/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst
index 9753731322800bbe72211fb25152cf5167cc7478..edda7f819c0ba67b216aa4c4426e3ebfa0df6ee5 100644
--- a/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/exponential_decay_cn.rst
@@ -5,6 +5,12 @@ exponential_decay
.. py:function:: paddle.fluid.layers.exponential_decay(learning_rate,decay_steps,decay_rate,staircase=False)
+:alias_main: paddle.nn.functional.exponential_decay
+:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay
+:old_api: paddle.fluid.layers.exponential_decay
+
+
+
在学习率上运用指数衰减。
训练模型时,在训练过程中降低学习率。每 ``decay_steps`` 步骤中以 ``decay_rate`` 衰减学习率。
diff --git a/doc/fluid/api_cn/layers_cn/eye_cn.rst b/doc/fluid/api_cn/layers_cn/eye_cn.rst
index 3ed5702c446a02f192f5239abf91f99943498106..6044ab6e322580b82e1aa1e830b3c23554506c98 100644
--- a/doc/fluid/api_cn/layers_cn/eye_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/eye_cn.rst
@@ -3,19 +3,20 @@
eye
-------------------------------
-.. py:function:: paddle.fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype='float32')
+.. py:function:: paddle.fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None)
-该OP用来构建单位矩阵,或一个批次的单位矩阵。
+
+该OP用来构建二维Tensor,或一个批次的二维Tensor。
参数:
- - **num_rows** (int) - 每一个批矩阵的行数,数据类型为非负int32。
- - **num_columns** (int) - 每一个批矩阵的列数,数据类型为非负int32。若为None,则默认等于num_rows。
- - **batch_shape** (list(int)) - 如若提供,则返回向量的主批次维度将为batch_shape。
- - **dtype** (string) - 返回张量的数据类型,可为int32,int64,float16,float32,float64。
+ - **num_rows** (int) - 该批次二维Tensor的行数,数据类型为非负int32。
+ - **num_columns** (int, 可选) - 该批次二维Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。
+ - **batch_shape** (list(int), 可选) - 如若提供,则返回Tensor的主批次维度将为batch_shape。
+ - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 返回Tensor的数据类型,可为int32,int64,float16,float32,float64,默认数据类型为float32。
+ - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。
-返回:shape为batch_shape + [num_rows, num_columns]的张量。
+返回: ``shape`` 为batch_shape + [num_rows, num_columns]的Tensor。
-返回类型:Variable(Tensor|LoDTensor)数据类型为int32,int64,float16,float32,float64的Tensor或者LoDTensor。
**代码示例**:
diff --git a/doc/fluid/api_cn/layers_cn/fc_cn.rst b/doc/fluid/api_cn/layers_cn/fc_cn.rst
index 47836281b609abd836280d6336fada0f51ec43bc..6613b2d8879ab55272add049cb8461999f52866c 100644
--- a/doc/fluid/api_cn/layers_cn/fc_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/fc_cn.rst
@@ -3,10 +3,13 @@
fc
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**全连接层**
diff --git a/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst b/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst
deleted file mode 100644
index f465c471411d30bbdd216dd38a4ca1ecf3b4b0c9..0000000000000000000000000000000000000000
--- a/doc/fluid/api_cn/layers_cn/fill_constant_batch_size_like_cn.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. _cn_api_fluid_layers_fill_constant_batch_size_like:
-
-fill_constant_batch_size_like
--------------------------------
-
-.. py:function:: paddle.fluid.layers.fill_constant_batch_size_like(input,shape,dtype,value,input_dim_idx=0,output_dim_idx=0,force_cpu=False)
-
-该OP创建一个形状为shape并且数据类型为dtype的Tensor,同时用 ``value`` 中提供的常量初始化该Tensor。在输入为LoDTensor并且input_dim_idx为0的
-时候将输出output_dim_idx维度的大小设置为input输入的batch_size的值,创建的Tensor的stop_gradient属性默认为False。
-
-参数:
- - **input** (Variable)- 输入的Tensor或者LoDTensor,支持数据类型为 float32, float64, int32, int64,bool。
- - **shape** (list)- 创建Tensor的shape,最后创建的LoDTensor的shape可能会依据input发生变动。
- - **dtype** (np.dtype|core.VarDesc.VarType|str)- 创建Tensor的数据类型,支持数据类型为 float32, float64, int32, int64,bool。
- - **value** (float|int)- 用于初始化输出Tensor的常量数据的值。
- - **input_dim_idx** (int)- 当值为0并且输入为LoDTensor的时候,创建Tensor的output_dim_idx维度会设置为input的batch_size值,默认值为0。
- - **output_dim_idx** (int) -用于指定创建的Tensor哪个维度设置为输入batch_size的值,默认值为0。
- - **force_cpu** (bool)- 用于返回的Tensor是否创建在CPU上,默认值为False,若设为true,则数据在CPU上。
-
-返回:创建的Tensor, 数据类型为dtype。
-
-返回类型:(Variable)
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
- data = fluid.layers.fill_constant_batch_size_like(
- input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
\ No newline at end of file
diff --git a/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst b/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst
index 867c6420855e6915c35aba0eeda346c998fc8e5a..f7af206495c0640ef87b2806666fdf919015463e 100644
--- a/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/fill_constant_cn.rst
@@ -5,12 +5,18 @@ fill_constant
.. py:function:: paddle.fluid.layers.fill_constant(shape,dtype,value,force_cpu=False,out=None)
+:alias_main: paddle.fill_constant
+:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant
+:old_api: paddle.fluid.layers.fill_constant
+
+
+
该OP创建一个形状为shape并且数据类型为dtype的Tensor,同时用 ``value`` 中提供的常量初始化该Tensor。
创建的Tensor的stop_gradient属性默认为True。
参数:
- - **shape** (tuple|list)- 创建Tensor的形状。
+ - **shape** (tuple|list|Variable)- 要创建的LoDTensor或者SelectedRows的形状。 数据类型为int32或int64。 如果shape是一个列表或元组,则其元素应该是形状为[1]的整数或Tensor。 如果shape是Variable,则它应该是一维Tensor。
- **dtype** (np.dtype|core.VarDesc.VarType|str)- 创建LoDTensor或者SelectedRows的数据类型,支持数据类型为float16, float32, float64, int32, int64。
- **value** (float|int)- 用于初始化输出LoDTensor或者SelectedRows的常量数据的值。
- **force_cpu** (bool)- 用于标志LoDTensor或者SelectedRows是否创建在CPU上,默认值为False,若设为true,则数据必须在CPU上。
@@ -21,6 +27,10 @@ fill_constant
返回类型:变量(Variable)
+抛出异常:
+ - :code:`TypeError`: dtype必须是bool,float16,float32,float64,int32和int64之一,输出Tensor的数据类型必须与dtype相同。
+ - :code:`TypeError`: 当 `shape` 的数据类型不是list、tuple、Variable。
+
**代码示例**:
.. code-block:: python
@@ -29,3 +39,11 @@ fill_constant
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
#data1=[[5],[5]] data2=[[5],[5]]
+
+ # attr shape is a list which contains Variable Tensor.
+ positive_2 = fluid.layers.fill_constant([1], "int32", 2)
+ data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]
+
+ # attr shape is a Variable Tensor.
+ shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
+ data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
diff --git a/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst b/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst
index de172115d9cf33913a8c99296021a59c1be93ec1..212eef72dc7a6b3dc1506270feabc78fcf29c2f1 100644
--- a/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/filter_by_instag_cn.rst
@@ -5,6 +5,12 @@ filter_by_instag
.. py:function:: paddle.fluid.layers.filter_by_instag(ins, ins_tag, filter_tag, is_lod)
+:alias_main: paddle.nn.functional.filter_by_instag
+:alias: paddle.nn.functional.filter_by_instag,paddle.nn.functional.extension.filter_by_instag
+:old_api: paddle.fluid.layers.filter_by_instag
+
+
+
此函数通过instag来过滤ins batch,大量属于同样的tags的样本,我们可以指定我们想要的一些tags,属于这些tags的样本将会被保留在输出中,其余的将会移除。比如,一个batch有4个样本,每个样本都有自己的tag表。
Ins | Ins_Tag |
@@ -26,6 +32,7 @@ Lod为[1,1,1,1],filter tags为[1],从上面的定义中,带有标签
- **ins_tag** (Variable) - 输入变量(LoDTensor),通常为1维列表,通过lod info来分割。
- **filter_tag** (Variable) - 输入变量(1D Tensor/List),通常为持有tags的列表。
- **is_lod** (Bool) – 指定样本是否为lod tensor的布尔值。
+ - **out_val_if_empty** (Int64) - 如果batch内样本被全部过滤,输出会被指定成这个值。
返回:过滤之后的样本(LoDTensor)和 损失权重(Tensor)。
diff --git a/doc/fluid/api_cn/layers_cn/flatten_cn.rst b/doc/fluid/api_cn/layers_cn/flatten_cn.rst
index 2f6e36f79b4311a6b846f9d459761949fda7c296..3e314f655a8cf5b597daeac8507732a9c571d130 100644
--- a/doc/fluid/api_cn/layers_cn/flatten_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/flatten_cn.rst
@@ -5,6 +5,12 @@ flatten
.. py:function:: paddle.fluid.layers.flatten(x, axis=1, name=None)
+:alias_main: paddle.flatten
+:alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten
+:old_api: paddle.fluid.layers.flatten
+
+
+
flatten op将输入的多维Tensor展平成2-D Tensor矩阵
例如:
diff --git a/doc/fluid/api_cn/layers_cn/floor_cn.rst b/doc/fluid/api_cn/layers_cn/floor_cn.rst
index de905921a47b49a5cdd0d374b5303d55b392f34e..e1dc4edb72600bf63fd26555942c75d69678f7f4 100644
--- a/doc/fluid/api_cn/layers_cn/floor_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/floor_cn.rst
@@ -5,6 +5,12 @@ floor
.. py:function:: paddle.fluid.layers.floor(x, name=None)
+:alias_main: paddle.floor
+:alias: paddle.floor,paddle.tensor.floor,paddle.tensor.math.floor
+:old_api: paddle.fluid.layers.floor
+
+
+
向下取整函数。
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst b/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst
index 64b31e83040f91baa74797ab07457f2c205bee16..97937d7c8225b6be736eb2616a7a220ab35f4b7a 100644
--- a/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/fsp_matrix_cn.rst
@@ -5,6 +5,12 @@ fsp_matrix
.. py:function:: paddle.fluid.layers.fsp_matrix(x, y)
+:alias_main: paddle.nn.functional.fsp_matrix
+:alias: paddle.nn.functional.fsp_matrix,paddle.nn.functional.vision.fsp_matrix
+:old_api: paddle.fluid.layers.fsp_matrix
+
+
+
**FSP matrix op**
fsp_matrix op用于计算两个4-D Tensor特征图的求解过程(FSP)矩阵。假设特征图x的形状为 :math:`[x\_channel,h,w]` ,特征图y的形状为 :math:`[y\_channel,h,w]` ,fsp_matrix op分两步得到x和y的fsp矩阵:
diff --git a/doc/fluid/api_cn/layers_cn/gather_cn.rst b/doc/fluid/api_cn/layers_cn/gather_cn.rst
index 5418839fc02bf8f624e1550c56cbc2f11c58608e..9d0caee7b263b87387fcffe9fc24861b8e3950f3 100644
--- a/doc/fluid/api_cn/layers_cn/gather_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gather_cn.rst
@@ -5,7 +5,10 @@ gather
.. py:function:: paddle.fluid.layers.gather(input, index, overwrite=True)
-根据索引 ``index`` 获取输入(input)的最外层维度的条目,并将它们拼接在一起。
+
+
+
+根据索引 ``index`` 获取输入 ``input`` 的最外层维度的条目,并将它们拼接在一起。
.. math::
@@ -26,13 +29,12 @@ gather
参数:
- - **input** (Variable) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。
- - **index** (Variable) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。
+ - **input** (Tensor) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。
+ - **index** (Tensor) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。
- **overwrite** (bool) - 具有相同索引时在反向更新梯度的模式。如果为 ``True`` ,则使用覆盖模式更新相同索引的梯度;如果为 ``False`` ,则使用累积模式更新相同索引的梯度。默认值为 ``True`` 。
返回:和输入的秩相同的输出张量。
-返回类型:Variable
**代码示例**
diff --git a/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst b/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst
index d43cbf3cf152e01542f305f30ee0cae1ba16a99f..8a570ff2e2840215d6d84712e244c192a2344d22 100644
--- a/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gather_nd_cn.rst
@@ -5,6 +5,7 @@ gather_nd
.. py:function:: paddle.fluid.layers.gather_nd(input, index, name=None)
+
该OP是 :code:`gather` 的高维推广,并且支持多轴同时索引。 :code:`index` 是一个K维度的张量,它可以认为是从 :code:`input` 中取K-1维张量,每一个元素是一个切片:
.. math::
@@ -50,18 +51,18 @@ gather_nd
参数:
- - **input** (Variable) - 输入张量,数据类型可以是int32,int64,float32,float64, bool。
- - **index** (Variable) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= input.rank` 。
- - **name** (string) - 该层的名字,默认值为None,表示会自动命名。
+ - **input** (Tensor) - 输入Tensor,数据类型可以是int32,int64,float32,float64, bool。
+ - **index** (Tensor) - 输入的索引Tensor,其数据类型为int32或者int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= input.rank` 。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:shape为index.shape[:-1] + input.shape[index.shape[-1]:]的Tensor|LoDTensor,数据类型与 :code:`input` 一致。
-返回类型:Variable
**代码示例**:
.. code-block:: python
+ import paddle
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[3, 4, 5], dtype='float32')
index = fluid.layers.data(name='index', shape=[2, 2], dtype='int32')
diff --git a/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst b/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst
index af89db7ddad043cfcac2336bd9b4e51a3eb9abd3..0d8354364fcf965fa0e8a1a81d37a188bb8f0f4b 100644
--- a/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gather_tree_cn.rst
@@ -5,6 +5,12 @@ gather_tree
.. py:function:: paddle.fluid.layers.gather_tree(ids, parents)
+:alias_main: paddle.nn.gather_tree
+:alias: paddle.nn.gather_tree,paddle.nn.decode.gather_tree
+:old_api: paddle.fluid.layers.gather_tree
+
+
+
该OP在整个束搜索(Beam Search)结束后使用。在搜索结束后,可以获得每个时间步选择的的候选词id及其对应的在搜索树中的parent节点, ``ids`` 和 ``parents`` 的形状布局均为 :math:`[max\_time, batch\_size, beam\_size]` ,该OP从最后一个时间步回溯产生完整的id序列。
diff --git a/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst b/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst
deleted file mode 100644
index bc223787946cf7a111ab2ccf1b3c66c7a4dd7cdc..0000000000000000000000000000000000000000
--- a/doc/fluid/api_cn/layers_cn/gaussian_random_batch_size_like_cn.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-.. _cn_api_fluid_layers_gaussian_random_batch_size_like:
-
-gaussian_random_batch_size_like
--------------------------------
-
-.. py:function:: paddle.fluid.layers.gaussian_random_batch_size_like(input, shape, input_dim_idx=0, output_dim_idx=0, mean=0.0, std=1.0, seed=0, dtype='float32')
-
-使用高斯随机发生器初始化张量。高斯分布的默认均值(mean)为0,默认标准差(std)为 1 。用户可以通过输入参数设置 mean 和 std 。
-
-参数:
- - **input** (Variable)- 其 input_dim_idx'th 维度指定 batch_size 的张量(Tensor)。
- - **shape** (tuple|list)- 输出的形状。
- - **input_dim_idx** (Int)- (默认值0)输入批量大小维度的索引。
- - **output_dim_idx** (Int)- (默认值0)输出批量大小维度的索引。
- - **mean** (float)- (默认值 0.0)高斯分布的平均值(或中心值)。
- - **std** (float)- (默认值 1.0)高斯分布的标准差(std或spread)。
- - **seed** (int)- (默认值为 0)用于随机数发生器的随机种子。0表示使用系统生成的种子。请注意,如果seed不为0,则此算子每次将始终生成相同的随机数。
- - **dtype** (np.dtype | core.VarDesc.VarType | str)- 输出数据的类型,float32、float_16、int 等。
-
-返回:指定形状的张量,由从高斯分布抽样产生的随机数所填充。
-
-返回类型:Variable
-
-
-
-**代码示例:**
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- input = fluid.layers.data(name="input", shape=[13, 11], dtype='float32')
-
- out = fluid.layers.gaussian_random_batch_size_like(
- input, shape=[-1, 11], mean=1.0, std=2.0)
-
-
-
-
-
-
diff --git a/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst b/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst
index 4ab6da671cfcb437ca0a2dc7ac13963dac70e9e5..059f19be02e3982a43bcec9a3ccbcd25e9bda5fd 100644
--- a/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gaussian_random_cn.rst
@@ -3,27 +3,29 @@
gaussian_random
-------------------------------
-.. py:function:: paddle.fluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32')
+.. py:function:: paddle.fluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None)
-生成数据符合高斯随机分布的 Tensor。
-参数:
- - **shape** (Tuple[int] | List[int])- 生成 Tensor 的形状。
- - **mean** (float)- 随机 Tensor 的均值,默认值为 0.0。
- - **std** (float)- 随机 Tensor 的标准差,默认值为 1.0。
- - **seed** (int)- 随机数种子,默认值为 0。注:seed 设置为 0 表示使用系统的随机数种子。注意如果 seed 不为 0,则此算子每次将始终生成相同的随机数。
- - **dtype** (np.dtype | core.VarDesc.VarType | str)- 输出 Tensor 的数据类型,可选值为 float32,float64。
-返回:
- - 符合高斯分布的随机 Tensor。形状为 shape,数据类型为 dtype。
+该OP返回数值符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。
-返回类型:
+参数:
+ - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。
+ - **mean** (float|int, 可选) - 输出Tensor的均值,支持的数据类型:float、int。默认值为0.0。
+ - **std** (float|int, 可选) - 输出Tensor的标准差,支持的数据类型:float、int。默认值为1.0。
+ - **seed** (int, 可选) - 随机数种子,默认值为 0。注:seed 设置为 0 表示使用系统的随机数种子。注意如果 seed 不为 0,则此算子每次将始终生成相同的随机数。
+ - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持float32、float64。默认值为float32。
+ - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。
- - Variable
+返回:
+ Tensor:符合高斯随机分布的Tensor,形状为 ``shape``,数据类型为 ``dtype``。
+抛出异常:
+ - ``TypeError`` - 如果 ``shape`` 的类型不是list、tuple、Tensor。
+ - ``TypeError`` - 如果 ``dtype`` 不是float32、float64。
-**代码示例:**
+**代码示例**:
.. code-block:: python
diff --git a/doc/fluid/api_cn/layers_cn/gelu_cn.rst b/doc/fluid/api_cn/layers_cn/gelu_cn.rst
index b3a3d06be47156a2f80e2db288e5239040dc8dcd..c234e3b574b2ee089f6cc39b000f28590ad1630d 100644
--- a/doc/fluid/api_cn/layers_cn/gelu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gelu_cn.rst
@@ -5,14 +5,27 @@ gelu
.. py:function:: paddle.fluid.layers.gelu(x)
+:alias_main: paddle.nn.functional.gelu
+:alias: paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu
+:old_api: paddle.fluid.layers.gelu
+
+
+
逐元素计算 Gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。
+如果使用近似计算:
+
+.. math::
+ out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
+
+如果不使用近似计算:
.. math::
out = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
参数:
- **x** (Variable) - Gelu Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float32 或 float64。
+ - **approximate** (bool, 可选) - 是否使用近似计算,默认值为 False。
返回:
- 多维 Tensor 或 LoDTensor, 数据类型为 float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。
diff --git a/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst b/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst
index 5e28303903e7dc8a8c05fb2f109979bb8560785d..51854d3898e3f968a1f6704b7784edbcc6155755 100644
--- a/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/generate_mask_labels_cn.rst
@@ -5,6 +5,12 @@ generate_mask_labels
.. py:function:: paddle.fluid.layers.generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, labels_int32, num_classes, resolution)
+:alias_main: paddle.nn.functional.generate_mask_labels
+:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
+:old_api: paddle.fluid.layers.generate_mask_labels
+
+
+
**为Mask-RCNN生成mask标签**
对于给定的 RoI (Regions of Interest) 和 输入ground truth的分类标签和分割的坐标标签,采样出前景RoI,并返回其在输入 ``rois`` 中索引位置,并对每个RoI生成 :math:`K*M^{2}` 的二值mask标签。K为类别个数,M是RoI特征图大小。这些输出目标一般用于计算mask分支的损失。
diff --git a/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst b/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst
index 943638a28ab79b8ea65add5f1f5ac50fd1eca6b4..a957d20deaa3b9f05ce21436c49526c760e5094f 100644
--- a/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/generate_proposal_labels_cn.rst
@@ -5,6 +5,12 @@ generate_proposal_labels
.. py:function:: paddle.fluid.layers.generate_proposal_labels(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, batch_size_per_im=256, fg_fraction=0.25, fg_thresh=0.25, bg_thresh_hi=0.5, bg_thresh_lo=0.0, bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], class_nums=None, use_random=True, is_cls_agnostic=False, is_cascade_rcnn=False)
+:alias_main: paddle.nn.functional.generate_proposal_labels
+:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
+:old_api: paddle.fluid.layers.generate_proposal_labels
+
+
+
**注意:该OP无对应的反向OP**
该OP根据RPN预测产出的bounding boxes和groundtruth,抽取出用来计算loss的foreground boxes and background boxes。
diff --git a/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst b/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst
index 909d969f5e92cc27e54d78a4766ecb44548b9920..7bd143ec306bbeac65430bca235d6dfc7eb32e45 100644
--- a/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/generate_proposals_cn.rst
@@ -5,6 +5,12 @@ generate_proposals
.. py:function:: paddle.fluid.layers.generate_proposals(scores, bbox_deltas, im_info, anchors, variances, pre_nms_top_n=6000, post_nms_top_n=1000, nms_thresh=0.5, min_size=0.1, eta=1.0, name=None)
+:alias_main: paddle.nn.functional.generate_proposals
+:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
+:old_api: paddle.fluid.layers.generate_proposals
+
+
+
该OP根据每个检测框为foreground对象的概率,推选生成用于后续检测网络的RoIs。
其中的检测框根据 ``anchors`` 和 ``bbox_deltas`` 计算得到。
diff --git a/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst b/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst
index 4a8f190d3528acebc7668de960be0e31a1ae2aa2..3a9b9268aa345959d50b69667659b77203777a7c 100644
--- a/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/get_tensor_from_selected_rows_cn.rst
@@ -5,6 +5,9 @@ get_tensor_from_selected_rows
.. py:function:: paddle.fluid.layers.get_tensor_from_selected_rows(x, name=None)
+
+
+
该OP从SelectedRows类型的输入中获取向量数据,以LoDTensor的形式输出。
diff --git a/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst b/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst
index 22f3503105fd6ebc5209855a62c684bf5b68c207..7141718419cc55b0fdb4546dcd16bc89c92a2e35 100644
--- a/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/greater_equal_cn.rst
@@ -3,7 +3,13 @@
greater_equal
-------------------------------
-.. py:function:: paddle.fluid.layers.greater_equal(x, y, cond=None)
+.. py:function:: paddle.fluid.layers.greater_equal(x, y, cond=None, name=None)
+
+:alias_main: paddle.greater_equal
+:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
+:old_api: paddle.fluid.layers.greater_equal
+
+
该OP逐元素地返回 :math:`x >= y` 的逻辑值,使用重载算子 `>=` 可以有相同的计算函数效果。
@@ -12,7 +18,7 @@ greater_equal
- **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape,数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据shape和数据类型需要和输入x一致。默认值为None。
- - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的Tensor,数据的shape和输入x一致。
@@ -25,13 +31,11 @@ greater_equal
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
- label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
- limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
- out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
+ label = layers.assign(np.array([2, 2], dtype='int32'))
+ limit = layers.assign(np.array([2, 3], dtype='int32'))
out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
- out1 = fluid.layers.greater_equal(x=label, y=limit, cond=out_cond) #out1=[True, False], out_cond=[True, False]
- out2 = fluid.layers.greater_equal(x=label, y=limit, force_cpu=False) #out2=[True, False]
- out3 = label >= limit #out3=[True, False]
+ out_1 = label >= limit #out1=[True, False]
+
diff --git a/doc/fluid/api_cn/layers_cn/greater_than_cn.rst b/doc/fluid/api_cn/layers_cn/greater_than_cn.rst
index bbb3fa55058b51393c9b501f8bf84bc952789d91..3f208e21ad5433125b3c22fa76ee06968a7c8153 100644
--- a/doc/fluid/api_cn/layers_cn/greater_than_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/greater_than_cn.rst
@@ -3,7 +3,13 @@
greater_than
-------------------------------
-.. py:function:: paddle.fluid.layers.greater_than(x, y, cond=None)
+.. py:function:: paddle.fluid.layers.greater_than(x, y, cond=None, name=None)
+
+:alias_main: paddle.greater_than
+:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
+:old_api: paddle.fluid.layers.greater_than
+
+
该OP逐元素地返回 :math:`x > y` 的逻辑值,使用重载算子 `>` 可以有相同的计算函数效果。
@@ -11,7 +17,7 @@ greater_than
- **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。
- - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的Tensor,数据的shape和输入x一致。
@@ -24,13 +30,11 @@ greater_than
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
- label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
- limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
- out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
+ label = layers.assign(np.array([2, 3], dtype='int32'))
+ limit = layers.assign(np.array([3, 2], dtype='int32'))
out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
- out1 = fluid.layers.greater_than(x=label, y=limit, cond=out_cond) #out1=[False, True], out_cond=[False, True]
- out2 = fluid.layers.greater_than(x=label, y=limit, force_cpu=False) #out2=[False, True]
- out3 = label > limit #out3=[False, True]
+ out1 = label > limit #out1=[False, True]
+
diff --git a/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst b/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst
index cab3d1e5c9cfbe32119e881cb020c87b6ccd0fbd..8871106520eea576edbddf0b606e8113cfd4e85f 100644
--- a/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/grid_sampler_cn.rst
@@ -5,6 +5,12 @@ grid_sampler
.. py:function:: paddle.fluid.layers.grid_sampler(x, grid, name=None)
+:alias_main: paddle.nn.functional.grid_sampler
+:alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler
+:old_api: paddle.fluid.layers.grid_sampler
+
+
+
该OP基于flow field网格的对输入X进行双线性插值采样。网格通常由affine_grid生成, shape为[N, H, W, 2],是shape为[N, H, W]的采样点张量的(x, y)坐标。
其中,x坐标是对输入数据X的第四个维度(宽度维度)的索引,y坐标是第三维度(高维度)的索引,最终输出采样值为采样点的4个最接近的角点的双线性插值结果,输出张量的shape为[N, C, H, W]。
diff --git a/doc/fluid/api_cn/layers_cn/group_norm_cn.rst b/doc/fluid/api_cn/layers_cn/group_norm_cn.rst
index 91b393788ab27255ae086ca3bc693b3f6ad47f2d..049a9e2f0055ca6ce3220c526f25f0abdd335c3d 100755
--- a/doc/fluid/api_cn/layers_cn/group_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/group_norm_cn.rst
@@ -3,10 +3,13 @@
group_norm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
参考论文: `Group Normalization `_
参数:
diff --git a/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst b/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst
index d4a531b6b10055418ad985f9531fec1f9d5ef225..679e5dfe42bb6c4a3baad252af83d88c3ff737a7 100644
--- a/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/gru_unit_cn.rst
@@ -3,10 +3,13 @@
gru_unit
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.gru_unit(input, hidden, size, param_attr=None, bias_attr=None, activation='tanh', gate_activation='sigmoid', origin_mode=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
Gated Recurrent Unit(GRU)循环神经网络计算单元。该OP用于完成单个时间步内GRU的计算,支持以下两种计算方式:
如果origin_mode为True,则使用的运算公式来自论文
diff --git a/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst b/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst
index 4756407f8128d1b32743eff55ba347b523e6e679..d139a6a8fe52833cfa1e682bdcc7940d613b2270 100644
--- a/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/hard_shrink_cn.rst
@@ -5,6 +5,12 @@ hard_shrink
.. py:function:: paddle.fluid.layers.hard_shrink(x,threshold=None)
+:alias_main: paddle.nn.functional.hard_shrink
+:alias: paddle.nn.functional.hard_shrink,paddle.nn.functional.activation.hard_shrink
+:old_api: paddle.fluid.layers.hard_shrink
+
+
+
HardShrink激活函数(HardShrink activation operator)
diff --git a/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst
index ef14f86f6c9eef9ce1f8f5e095fccd524313508f..f015a799c57376ee406cbac16d68d8a98b72dda4 100644
--- a/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/hard_sigmoid_cn.rst
@@ -5,6 +5,12 @@ hard_sigmoid
.. py:function:: paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None)
+:alias_main: paddle.nn.functional.hard_sigmoid
+:alias: paddle.nn.functional.hard_sigmoid,paddle.nn.functional.activation.hard_sigmoid
+:old_api: paddle.fluid.layers.hard_sigmoid
+
+
+
sigmoid的分段线性逼近激活函数,速度比sigmoid快,详细解释参见 https://arxiv.org/abs/1603.00391。
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst b/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst
index 0319c855872f0b537d2bf8f66aee912dfa41ce7e..8118ea27a2f2c6f1855e02cd8451a1de6d11151a 100644
--- a/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/hard_swish_cn.rst
@@ -5,6 +5,12 @@ hard_swish
.. py:function:: paddle.fluid.layers.hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None)
+:alias_main: paddle.nn.functional.hard_swish
+:alias: paddle.nn.functional.hard_swish,paddle.nn.functional.activation.hard_swish
+:old_api: paddle.fluid.layers.hard_swish
+
+
+
该OP实现了hard_swish激活函数。hard_swish激活函数在MobileNetV3架构中被提出,相较于swish函数,具有数值稳定性好,计算速度快等优点,具体原理请参考: https://arxiv.org/pdf/1905.02244.pdf
:math:`out = \frac{x * (min(max(0, x+offset), threshold))}{scale}`
diff --git a/doc/fluid/api_cn/layers_cn/has_inf_cn.rst b/doc/fluid/api_cn/layers_cn/has_inf_cn.rst
index 5383e047d8582dd53525c5ef8602c45beac0fe02..8bf1cb7b8bfcdb7e1547f75566f0b641d745baf6 100644
--- a/doc/fluid/api_cn/layers_cn/has_inf_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/has_inf_cn.rst
@@ -5,6 +5,12 @@ has_inf
.. py:function:: paddle.fluid.layers.has_inf(x)
+:alias_main: paddle.has_inf
+:alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf
+:old_api: paddle.fluid.layers.has_inf
+
+
+
检查输入的变量(x)中是否包含无穷数(inf)。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/has_nan_cn.rst b/doc/fluid/api_cn/layers_cn/has_nan_cn.rst
index c939c816edf2a830e495d5f0515c888c47ba7469..0f66985af75a733ba985155c6fd06255ce46b8cc 100644
--- a/doc/fluid/api_cn/layers_cn/has_nan_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/has_nan_cn.rst
@@ -5,6 +5,12 @@ has_nan
.. py:function:: paddle.fluid.layers.has_nan(x)
+:alias_main: paddle.has_nan
+:alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan
+:old_api: paddle.fluid.layers.has_nan
+
+
+
检查输入的变量(x)中是否包含NAN。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/hash_cn.rst b/doc/fluid/api_cn/layers_cn/hash_cn.rst
index 8f6b1f147ba53888d99dc6153fa0be9ceca61602..a25828a10dc67b9697541d6e83234272ffbbb81a 100644
--- a/doc/fluid/api_cn/layers_cn/hash_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/hash_cn.rst
@@ -5,6 +5,12 @@ hash
.. py:function:: paddle.fluid.layers.hash(input, hash_size, num_hash=1, name=None)
+:alias_main: paddle.nn.functional.hash
+:alias: paddle.nn.functional.hash,paddle.nn.functional.lod.hash
+:old_api: paddle.fluid.layers.hash
+
+
+
该OP将输入 hash 成为一个整数,该数的值小于给定的 ``hash_size`` 。**仅支持输入为LoDTensor**。
该OP使用的哈希算法是:xxHash - `Extremely fast hash algorithm `_
@@ -30,8 +36,8 @@ hash
place = fluid.core.CPUPlace()
# 构建网络
- x = fluid.layers.data(name="x", shape=[1], dtype="int32", lod_level=1)
- res = fluid.layers.hash(name="res",input=x, hash_size=1000, num_hash=4)
+ x = fluid.data(name="x", shape=[2, 2], dtype="int32", lod_level=1)
+ res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4)
# 创建CPU执行器
exe = fluid.Executor(place)
@@ -39,9 +45,7 @@ hash
in1 = np.array([[1,2],[3,4]]).astype("int32")
print(in1)
- x_i = fluid.core.LoDTensor()
- x_i.set(in1,place)
- x_i.set_recursive_sequence_lengths([[0,2]])
+ x_i = fluid.create_lod_tensor(in1, [[0, 2]], place)
res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
print(np.array(res[0]))
# [[[722]
diff --git a/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst
index 2c3656e21c7600227c0046ee3483d05667bb862c..fa8a9704fe470bea9ceef5c53506ece75b80a44d 100644
--- a/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/hsigmoid_cn.rst
@@ -3,10 +3,13 @@
hsigmoid
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None, name=None, path_table=None, path_code=None, is_custom=False, is_sparse=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。
该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。
diff --git a/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst b/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst
index 62729ffd2114492329bfcbfa0a5bd2a83d769c92..753e38949351d8d6001d3e6df6799a8c27fa68bf 100644
--- a/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/huber_loss_cn.rst
@@ -5,6 +5,12 @@ huber_loss
.. py:function:: paddle.fluid.layers.huber_loss(input, label, delta)
+:alias_main: paddle.nn.functional.huber_loss
+:alias: paddle.nn.functional.huber_loss,paddle.nn.functional.loss.huber_loss
+:old_api: paddle.fluid.layers.huber_loss
+
+
+
该OP计算输入(input)与标签(label)之间的Huber损失。Huber损失是常用的回归损失之一,相较于平方误差损失,Huber损失减小了对异常点的敏感度,更具鲁棒性。
diff --git a/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst b/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst
index e60f9f7d6c49087f4f38b70bedb6c7ca920f0d02..88ca3cead90ac752adb9b1326104c0db557c216d 100644
--- a/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/im2sequence_cn.rst
@@ -3,10 +3,13 @@
im2sequence
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.im2sequence(input, filter_size=1, stride=1, padding=0, input_image_size=None, out_stride=1, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP使用 `filter` 扫描输入的Tensor并将输入Tensor转换成序列,返回值的 `shape={input.batch_size * output_height * output_width, filter_size_height* filter_size_width * input.channels}` 。返回值的timestep的个数为 `output_height * output_width` , 每个timestep的维度是 `filter_size_height* filter_size_width * input.channels` 。其中 `output_height` 和 `output_width` 由以下式计算:
diff --git a/doc/fluid/api_cn/layers_cn/image_resize_cn.rst b/doc/fluid/api_cn/layers_cn/image_resize_cn.rst
index ed44ceae87b6c27189a2284e9182c9f10409ac48..e0331de812743e569839bb9aa831b4667f0ce6a3 100644
--- a/doc/fluid/api_cn/layers_cn/image_resize_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/image_resize_cn.rst
@@ -5,6 +5,12 @@ image_resize
.. py:function:: paddle.fluid.layers.image_resize(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW')
+:alias_main: paddle.nn.functional.image_resize
+:alias: paddle.nn.functional.image_resize,paddle.nn.functional.vision.image_resize
+:old_api: paddle.fluid.layers.image_resize
+
+
+
**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。
该OP用于调整一个batch中图片的大小。
diff --git a/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst b/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst
index 438261d30c8b6d537de37cc8a70812ac17c67285..93c8a9583d9329ef27606404b477dacc55bca21d 100644
--- a/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/image_resize_short_cn.rst
@@ -5,6 +5,12 @@ image_resize_short
.. py:function:: paddle.fluid.layers.image_resize_short(input, out_short_len, resample='BILINEAR')
+:alias_main: paddle.nn.functional.image_resize_short
+:alias: paddle.nn.functional.image_resize_short,paddle.nn.functional.vision.image_resize_short
+:old_api: paddle.fluid.layers.image_resize_short
+
+
+
该OP用于调整一批图片的大小。输入图像的短边将被调整为给定的out_short_len 。输入图像的长边按比例调整大小,最终图像的长宽比保持不变。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/increment_cn.rst b/doc/fluid/api_cn/layers_cn/increment_cn.rst
index 4a5e20b2151c943ec6261d8235244d3b9e6703bc..8edb5db4ab6fb7c7494c40b7bb9e4f2ebac5e04a 100644
--- a/doc/fluid/api_cn/layers_cn/increment_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/increment_cn.rst
@@ -5,6 +5,12 @@ increment
.. py:function:: paddle.fluid.layers.increment(x, value=1.0, in_place=True)
+:alias_main: paddle.increment
+:alias: paddle.increment,paddle.tensor.increment,paddle.tensor.math.increment
+:old_api: paddle.fluid.layers.increment
+
+
+
使输入Tensor ``x`` 的数据累加 ``value`` , 该OP通常用于循环次数的计数。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst b/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst
new file mode 100755
index 0000000000000000000000000000000000000000..11077c5b78fe34fb0387a9d2dcb7bfd3f73c20b0
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/inplace_abn_cn.rst
@@ -0,0 +1,43 @@
+.. _cn_api_fluid_layers_inplace_abn:
+
+inplace_abn
+-------------------------------
+
+**注意:该API仅支持【静态图】模式**
+
+.. py:function:: paddle.fluid.layers.inplace_abn(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False, act_alpha=1.0)
+
+就地批正则化化激活层(Inplace Activation Batch Normalization Layer)
+
+此层使用就地内存计算批处理正则化和激活来实现节省内存,有关批量正则化计算,请参见 ``fluid.layers.batch_norm`` ,有关就地激活批正则化化的计算,请参考 `In-Place Activated BatchNorm for Memory-Optimized Training of DNNs `_。
+
+参数:
+ - **input** (Variable) - inplace_abn算子的输入特征,是一个Variable类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。
+ - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。
+ - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。
+ - **momentum** (float|Variable)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。更新公式为: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` , 默认:0.9。
+ - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。
+ - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的权重初始化是1.0。
+ - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。inplace_abn算子默认的偏置初始化是0.0。
+ - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。
+ - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。
+ - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``inplace_abn`` 将随机命名全局均值;否则, ``inplace_abn`` 将命名全局均值为 ``moving_mean_name`` 。默认:None。
+ - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``inplace_abn`` 将随机命名全局方差;否则, ``inplace_abn`` 将命名全局方差为 ``moving_variance_name`` 。默认:None。
+ - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。
+ - **use_global_stats** (bool) – 是否使用全局均值和方差。 在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。 在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。
+ - **act_alpha** (float) – 当 ``act`` 参数为None、leaky-relu、elu时,会使用就地批正则化激活算法,可通过此参数给定leaky-relu、elu的 ``alpha`` 值。默认:1.0。
+
+
+返回: 维度和输入相同的Tensor,在输入中运用批正则后的结果。
+
+返回类型:Variable
+
+**代码示例**:
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
+ hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
+ hidden2 = fluid.layers.inplace_abn(input=hidden1)
+ hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)
diff --git a/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst b/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst
index d2e819c7a2f916259747633b822d5cfc68e28762..a10ff93900023ad41c2b7e6461764f060eef3293 100644
--- a/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/instance_norm_cn.rst
@@ -3,10 +3,13 @@
instance_norm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下:
diff --git a/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst b/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst
index 6f98b6de183a63904ce167cc56ba99ca0df01a3a..1b7abf3fab067769c21faf730179f8fc88e32785 100644
--- a/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/inverse_time_decay_cn.rst
@@ -5,6 +5,12 @@ inverse_time_decay
.. py:function:: paddle.fluid.layers.inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False)
+:alias_main: paddle.nn.functional.inverse_time_decay
+:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay
+:old_api: paddle.fluid.layers.inverse_time_decay
+
+
+
在初始学习率上运用逆时衰减。
训练模型时,最好在训练过程中降低学习率。通过执行该函数,将对初始学习率运用逆时衰减函数。
diff --git a/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst b/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst
index 4bad4777767080615baa1a51022124bfc6fff5f0..befe69f841dcf40fdf4a70e34a91d59673af978d 100644
--- a/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/iou_similarity_cn.rst
@@ -5,6 +5,12 @@ iou_similarity
.. py:function:: paddle.fluid.layers.iou_similarity(x, y, box_normalized=True, name=None)
+:alias_main: paddle.nn.functional.iou_similarity
+:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
+:old_api: paddle.fluid.layers.iou_similarity
+
+
+
**IOU Similarity Operator**
计算两个框列表的intersection-over-union(IOU)。框列表 :math:`X` 应为LoDTensor, :math:`Y` 是普通张量, :math:`X` 成批输入的所有实例共享 :math:`Y` 中的框。给定框A和框B,IOU的运算如下:
diff --git a/doc/fluid/api_cn/layers_cn/is_empty_cn.rst b/doc/fluid/api_cn/layers_cn/is_empty_cn.rst
index ca6e45c94ca1cf7a5d15db29756299ec89c8f102..90d4eb57a74fc3d0975f29c058236e4ed2c0f7ee 100644
--- a/doc/fluid/api_cn/layers_cn/is_empty_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/is_empty_cn.rst
@@ -5,6 +5,12 @@ is_empty
.. py:function:: paddle.fluid.layers.is_empty(x, cond=None)
+:alias_main: paddle.is_empty
+:alias: paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty
+:old_api: paddle.fluid.layers.is_empty
+
+
+
测试变量是否为空
参数:
diff --git a/doc/fluid/api_cn/layers_cn/isfinite_cn.rst b/doc/fluid/api_cn/layers_cn/isfinite_cn.rst
index 4b7c8672a85ec977913c08c551038c2f84ed5966..c9b5474e809b248141ad4632e5a5aedd1e11fec9 100644
--- a/doc/fluid/api_cn/layers_cn/isfinite_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/isfinite_cn.rst
@@ -5,6 +5,12 @@ isfinite
.. py:function:: paddle.fluid.layers.isfinite(x)
+:alias_main: paddle.isfinite
+:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
+:old_api: paddle.fluid.layers.isfinite
+
+
+
``注意:此算子的输入 Tensor / LoDTensor 数据类型必须为 int32 / float / double 之一。``
测试 x 是否包含无穷值(即 nan 或 inf)。若元素均为有穷数,返回真;否则返回假。
diff --git a/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst b/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst
index 506199f3f85915ccdb25244b37db5a65a3b24581..8fc92afbd9748fe5499eb8e8aa0e9add97d24d8b 100644
--- a/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/kldiv_loss_cn.rst
@@ -5,6 +5,12 @@ kldiv_loss
.. py:function:: paddle.fluid.layers.kldiv_loss(x, target, reduction='mean', name=None)
+:alias_main: paddle.nn.functional.kldiv_loss
+:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss
+:old_api: paddle.fluid.layers.kldiv_loss
+
+
+
该OP计算输入(X)和输入(Target)之间的Kullback-Leibler散度损失。注意其中输入(X)应为对数概率值,输入(Target)应为概率值。
kL发散损失计算如下:
diff --git a/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst b/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst
index f87458a79eef2d265c8296202ccf51818a1f1fca..f0e50f6af88923236a3ab8661d70ec10fe29ccc0 100644
--- a/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/l2_normalize_cn.rst
@@ -5,6 +5,12 @@ l2_normalize
.. py:function:: paddle.fluid.layers.l2_normalize(x,axis,epsilon=1e-12,name=None)
+:alias_main: paddle.nn.functional.l2_normalize
+:alias: paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize
+:old_api: paddle.fluid.layers.l2_normalize
+
+
+
该OP计算欧几里得距离之和对x进行归一化。对于1-D张量(系数矩阵的维度固定为0)
计算公式如下:
diff --git a/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst b/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst
index 488adba84e63c8595f682f1f84227b5cfd2ff608..62292e20d0bbe1245f67a0f58f57a5941f5a9911 100644
--- a/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/label_smooth_cn.rst
@@ -5,6 +5,12 @@ label_smooth
.. py:function:: paddle.fluid.layers.label_smooth(label, prior_dist=None, epsilon=0.1, dtype='float32', name=None)
+:alias_main: paddle.nn.functional.label_smooth
+:alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth
+:old_api: paddle.fluid.layers.label_smooth
+
+
+
该OP实现了标签平滑的功能。标签平滑是一种对分类器层进行正则化的机制,称为标签平滑正则化(LSR)。由于直接优化正确标签的对数似然可能会导致过拟合,降低模型的适应能力,因此提出了标签平滑的方法来降低模型置信度。
标签平滑使用标签 :math:`y` 和一些固定模式随机分布变量 :math:`\mu` 。对 :math:`k` 标签,标签平滑的计算方式如下。
diff --git a/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst b/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst
index 3d49372f85e9ac22a480b803fb920c9ddf12e6ee..477b6e6ba82ebca08e62080ee79c7cb788fecbc0 100644
--- a/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/layer_norm_cn.rst
@@ -3,10 +3,13 @@
layer_norm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_
计算公式如下
diff --git a/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst b/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst
index 635decb48d980f93b448c0c638a5b44bf261ad43..736a39e0dfded848089f7511031addc5cf71dfe4 100644
--- a/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/leaky_relu_cn.rst
@@ -5,6 +5,12 @@ leaky_relu
.. py:function:: paddle.fluid.layers.leaky_relu(x, alpha=0.02, name=None)
+:alias_main: paddle.nn.functional.leaky_relu
+:alias: paddle.nn.functional.leaky_relu,paddle.nn.functional.activation.leaky_relu
+:old_api: paddle.fluid.layers.leaky_relu
+
+
+
LeakyRelu激活函数
.. math:: out=max(x,α∗x)
diff --git a/doc/fluid/api_cn/layers_cn/less_equal_cn.rst b/doc/fluid/api_cn/layers_cn/less_equal_cn.rst
index dae49dfbb8ff11a5d9336e4f0802972957b21e7e..da8b1b83343ec06b21738177555c79855efbdb2f 100644
--- a/doc/fluid/api_cn/layers_cn/less_equal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/less_equal_cn.rst
@@ -3,7 +3,13 @@
less_equal
-------------------------------
-.. py:function:: paddle.fluid.layers.less_equal(x, y, cond=None)
+.. py:function:: paddle.fluid.layers.less_equal(x, y, cond=None, name=None)
+
+:alias_main: paddle.less_equal
+:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
+:old_api: paddle.fluid.layers.less_equal
+
+
该OP逐元素地返回 :math:`x <= y` 的逻辑值,使用重载算子 `<=` 可以有相同的计算函数效果。
@@ -11,7 +17,7 @@ less_equal
- **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。
- - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的Tensor,数据的shape和输入x一致。
@@ -24,13 +30,11 @@ less_equal
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
- label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
- limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
- out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
+ label = layers.assign(np.array([1, 3], dtype='int32'))
+ limit = layers.assign(np.array([1, 2], dtype='int32'))
out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
- out1 = fluid.layers.less_equal(x=label, y=limit, cond=out_cond) #out1=[True, False], out_cond=[True, False]
- out2 = fluid.layers.less_equal(x=label, y=limit, force_cpu=False) #out2=[True, False]
- out3 = label<= limit #out3=[True, False]
+ out1 = label<= limit #out1=[True, False]
+
diff --git a/doc/fluid/api_cn/layers_cn/less_than_cn.rst b/doc/fluid/api_cn/layers_cn/less_than_cn.rst
index 7d32d6d3f49d52a4bae11925035d1f34edd56014..6ad37577315a293779e5b1da1a9e449179c9b52f 100644
--- a/doc/fluid/api_cn/layers_cn/less_than_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/less_than_cn.rst
@@ -3,7 +3,13 @@
less_than
-------------------------------
-.. py:function:: paddle.fluid.layers.less_than(x, y, force_cpu=None, cond=None)
+.. py:function:: paddle.fluid.layers.less_than(x, y, force_cpu=None, cond=None, name=None)
+
+:alias_main: paddle.less_than
+:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
+:old_api: paddle.fluid.layers.less_than
+
+
该OP逐元素地返回 :math:`x < y` 的逻辑值,使用重载算子 `<` 可以有相同的计算函数效果
@@ -14,6 +20,7 @@ less_than
- **y** (Variable) - 进行比较的第二个输入,是一个多维的LoDTensor/Tensor,数据类型可以是float32,float64,int32,int64。
- **force_cpu** (bool) – 如果为True则强制将输出变量写入CPU内存中,否则将其写入目前所在的运算设备上。默认值为False。注意:该属性已弃用,其值始终是False。
- **cond** (Variable,可选) – 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的LoDTensor/Tensor,数据的shape和输入x一致。
diff --git a/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst b/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst
index 90aebef0fb8a866a5ec1c69840b34723232e4db9..cf38de480033e68c8388e08990a991cf5792bfef 100755
--- a/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/linear_chain_crf_cn.rst
@@ -3,10 +3,13 @@
linear_chain_crf
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.linear_chain_crf(input, label, param_attr=None, length=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
线性链条件随机场(Linear Chain CRF)
条件随机场定义间接概率图,节点代表随机变量,边代表两个变量之间的依赖。CRF学习条件概率 :math:`P\left ( Y|X \right )` , :math:`X = \left ( x_{1},x_{2},...,x_{n} \right )` 是结构性输入,:math:`Y = \left ( y_{1},y_{2},...,y_{n} \right )` 为输入标签。
diff --git a/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst b/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst
index 59c281d419ad313e08a909b0cd31944ad77407e5..c480c68d074b01191296b96e8ec8d506fbfa437f 100644
--- a/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/linear_lr_warmup_cn.rst
@@ -5,6 +5,12 @@ linear_lr_warmup
.. py:function:: paddle.fluid.layers.linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr)
+:alias_main: paddle.nn.functional.linear_lr_warmup
+:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup
+:old_api: paddle.fluid.layers.linear_lr_warmup
+
+
+
该OP使用学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率,具体原理可参考: `Bag of Tricks for Image Classification with Convolutional Neural Networks `_
diff --git a/doc/fluid/api_cn/layers_cn/linspace_cn.rst b/doc/fluid/api_cn/layers_cn/linspace_cn.rst
index b1f7d2649919a4c00f0a3d891fa9a8b992c3004a..c301233b746178e17db08dacbf927d0b0b5a89f7 100644
--- a/doc/fluid/api_cn/layers_cn/linspace_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/linspace_cn.rst
@@ -3,19 +3,20 @@
linspace
-------------------------------
-.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype)
+.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype=None, name=None)
-该OP在给定区间内返回固定数目的均匀间隔的值。
+该OP返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。
+**注意:该OP不进行梯度计算**
参数:
- - **start** (float|Variable) – start是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。
- - **stop** (float|Variable) – end是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。
- - **num** (int|Variable) – num是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。
- - **dtype** (string) – 输出Tensor的数据类型,可以是‘float32’或者是‘float64’。
+ - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32 或者int64。
+ - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32或者int64。
+ - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。
+ - **dtype** (np.dtype|str, 可选) – 输出Tensor的数据类型,可以是float32,float64, int32或者int64。如果dtype的数据类型为None,输出Tensor数据类型为float32。
+ - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在mum为1的情况下,仅返回包含start元素值的Tensor。
-返回类型:Variable
**代码示例**:
diff --git a/doc/fluid/api_cn/layers_cn/load_cn.rst b/doc/fluid/api_cn/layers_cn/load_cn.rst
index f3e39758fd05e0824a225c7874bc870f34eaf93d..24760a5325296f9f0b568d2e7f1e4da6d2c35308 100644
--- a/doc/fluid/api_cn/layers_cn/load_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/load_cn.rst
@@ -5,6 +5,9 @@ load
.. py:function:: paddle.fluid.layers.load(out, file_path, load_as_fp16=None)
+
+
+
该OP操作将从磁盘文件中加载LoDTensor/SelectedRows变量。
diff --git a/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst b/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst
index 33a8d45038904f9c2ea9f5f090d7695ae2b53b76..f85aead2e4ef836512851fac9e34df172dea3550 100644
--- a/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/locality_aware_nms_cn.rst
@@ -5,6 +5,9 @@ locality_aware_nms
.. py:function:: paddle.fluid.layers.locality_aware_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=-1, name=None)
+
+
+
**局部感知NMS**
`局部感知NMS `_ 用于对边界框(bounding box)和评分(scores)执行局部感知非极大值抑制(LANMS)。
diff --git a/doc/fluid/api_cn/layers_cn/lod_append_cn.rst b/doc/fluid/api_cn/layers_cn/lod_append_cn.rst
index ac96e2db264c64b5de324e7d72b11943fea1bac1..2c7f754233a198fdcdc39d4fcb453ad56c1a81d6 100644
--- a/doc/fluid/api_cn/layers_cn/lod_append_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/lod_append_cn.rst
@@ -5,6 +5,9 @@ lod_append
.. py:function:: paddle.fluid.layers.lod_append(x, level)
+
+
+
给 ``x`` 的LoD添加 ``level`` 。
简单示例:
diff --git a/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst b/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst
index 20d9ea8b4fe46e4772e12aed7187eca00a138fdd..fbb090b26751d1a372911a7d35cf4425a2048e76 100644
--- a/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/lod_reset_cn.rst
@@ -5,6 +5,9 @@ lod_reset
.. py:function:: paddle.fluid.layers.lod_reset(x, y=None, target_lod=None)
+
+
+
根据给定的参数 ``y`` 或 ``target_lod`` ,重设输入 ``x`` (LoDTensor) 的 LoD 信息。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/log_cn.rst b/doc/fluid/api_cn/layers_cn/log_cn.rst
index 121761713770df69bf941ebf44c1521e9b4edc23..dc8f40a8081d7a7aa2fa6880761031c333edd88d 100644
--- a/doc/fluid/api_cn/layers_cn/log_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/log_cn.rst
@@ -5,6 +5,12 @@ log
.. py:function:: paddle.fluid.layers.log(x, name=None)
+:alias_main: paddle.log
+:alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log
+:old_api: paddle.fluid.layers.log
+
+
+
Log激活函数(计算自然对数)
diff --git a/doc/fluid/api_cn/layers_cn/log_loss_cn.rst b/doc/fluid/api_cn/layers_cn/log_loss_cn.rst
index 3134ba93f444bdcfd0e9b5f439860cb77b204642..aea52ab5f87ec3c61174a6516f10a650037007e5 100644
--- a/doc/fluid/api_cn/layers_cn/log_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/log_loss_cn.rst
@@ -5,6 +5,12 @@ log_loss
.. py:function:: paddle.fluid.layers.log_loss(input, label, epsilon=0.0001, name=None)
+:alias_main: paddle.nn.functional.log_loss
+:alias: paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss
+:old_api: paddle.fluid.layers.log_loss
+
+
+
**负log loss层**
该 OP 对输入的预测结果和目标标签进行计算,返回负对数损失值。
diff --git a/doc/fluid/api_cn/layers_cn/logical_and_cn.rst b/doc/fluid/api_cn/layers_cn/logical_and_cn.rst
index b5bdc229aeb90248c6de565941808e7c8ba20dd3..09e3efbcbcac121b94fc557a2ccee27357bfac80 100644
--- a/doc/fluid/api_cn/layers_cn/logical_and_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/logical_and_cn.rst
@@ -3,46 +3,35 @@
logical_and
-------------------------------
-.. py:function:: paddle.fluid.layers.logical_and(x, y, out=None, name=None)
+.. py:function:: paddle.logical_and(x, y, out=None, name=None)
-该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑与运算。
+该OP逐元素的对 ``x`` 和 ``y`` 进行逻辑与运算。
.. math::
Out = X \&\& Y
-参数:
- - **x** (Variable)- 逻辑与运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **y** (Variable)- 逻辑与运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
- - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。
-
-返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。
+.. note::
+ ``paddle.logical_and`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。
-返回类型:Variable
+参数:
+ - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。
+ - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。
+返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。
**代码示例:**
.. code-block:: python
- import paddle.fluid as fluid
- import numpy as np
-
- # Graph organizing
- x = fluid.layers.data(name='x', shape=[2], dtype='bool')
- y = fluid.layers.data(name='y', shape=[2], dtype='bool')
- res = fluid.layers.logical_and(x=x, y=y)
- # The comment lists another available method.
- # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
- # fluid.layers.logical_and(x=x, y=y, out=res)
-
- # Create an executor using CPU as an example
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
-
- # Execute
- x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
- y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
- res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
- print(res_val) # [[True, False], [False, False]]
+ import paddle
+ import numpy as np
+ paddle.disable_static()
+ x_data = np.array([True], dtype=np.bool)
+ y_data = np.array([True, False, True, False], dtype=np.bool)
+ x = paddle.to_tensor(x_data)
+ y = paddle.to_tensor(y_data)
+ res = paddle.logical_and(x, y)
+ print(res.numpy()) # [True False True False]
diff --git a/doc/fluid/api_cn/layers_cn/logical_not_cn.rst b/doc/fluid/api_cn/layers_cn/logical_not_cn.rst
index 40747ab04482ad36a95e3ed15dfc81434a3108a5..3eaf0f1719abac0ce3e63ed2867026f349e76fba 100644
--- a/doc/fluid/api_cn/layers_cn/logical_not_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/logical_not_cn.rst
@@ -3,19 +3,25 @@
logical_not
-------------------------------
-.. py:function:: paddle.fluid.layers.logical_not(x, out=None, name=None)
+.. py:function:: paddle.logical_not(x, out=None, name=None)
-该OP逐元素的对 ``X`` LoDTensor/Tensor进行逻辑非运算
+:alias_main: paddle.logical_not
+:alias: paddle.logical_not, paddle.tensor.logical_not, paddle.tensor.logic.logical_not
+:old_api: paddle.fluid.layers.logical_not
+
+
+
+该OP逐元素的对 ``X`` Variable进行逻辑非运算
.. math::
Out = !X
参数:
- - **x** (Variable)- 逻辑非运算的输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
+ - **x** (Variable)- 逻辑非运算的输入,是一个 Variable,数据类型只能是bool。
+ - **out** (Variable,可选)- 指定算子输出结果的 Variable,可以是程序中已经创建的任何 Variable。默认值为None,此时将创建新的Variable来保存输出结果。
- **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。
-返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。
+返回:与 ``x`` 维度相同,数据类型相同的 Variable。
返回类型:Variable
@@ -23,22 +29,11 @@ logical_not
.. code-block:: python
- import paddle.fluid as fluid
+ import paddle
import numpy as np
- # Graph organizing
- x = fluid.layers.data(name='x', shape=[2], dtype='bool')
- res = fluid.layers.logical_not(x)
- # The comment lists another availble method.
- # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
- # fluid.layers.logical_not(x, out=res)
-
- # Create an executor using CPU as an example
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
-
- # Execute
- x_i = np.array([[1, 0]]).astype(np.bool)
- res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
- print(res_val) # [[False, True]]
-
+ paddle.enable_imperative()
+ x_data = np.array([True, False, True, False], dtype=np.bool)
+ x = paddle.imperative.to_variable(x_data)
+ res = paddle.logical_not(x)
+ print(res.numpy()) # [False True False True]
diff --git a/doc/fluid/api_cn/layers_cn/logical_or_cn.rst b/doc/fluid/api_cn/layers_cn/logical_or_cn.rst
index dac0d971074c323f922db9de159fdd1fe7b0b166..9cb3420ea323948e47209be35dada5a85d2d51ea 100644
--- a/doc/fluid/api_cn/layers_cn/logical_or_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/logical_or_cn.rst
@@ -3,46 +3,35 @@
logical_or
-------------------------------
-.. py:function:: paddle.fluid.layers.logical_or(x, y, out=None, name=None)
+.. py:function:: paddle.logical_or(x, y, out=None, name=None)
-该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑或运算。
+该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑或运算。
.. math::
Out = X || Y
-参数:
- - **x** (Variable)- 逻辑或运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **y** (Variable)- 逻辑或运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
- - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。
-
-返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。
+.. note::
+ ``paddle.logical_or`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。
-返回类型:Variable
+参数:
+ - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。
+ - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。
+返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。
**代码示例:**
.. code-block:: python
- import paddle.fluid as fluid
- import numpy as np
-
- # Graph organizing
- x = fluid.layers.data(name='x', shape=[2], dtype='bool')
- y = fluid.layers.data(name='y', shape=[2], dtype='bool')
- res = fluid.layers.logical_or(x=x, y=y)
- # The comment lists another available method.
- # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
- # fluid.layers.logical_or(x=x, y=y, out=res)
-
- # Create an executor using CPU as an example
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
-
- # Execute
- x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
- y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
- res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
- print(res_val) # [[True, True], [False, True]]
+ import paddle
+ import numpy as np
+ paddle.disable_static()
+ x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
+ y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
+ x = paddle.to_tensor(x_data)
+ y = paddle.to_tensor(y_data)
+ res = paddle.logical_or(x, y)
+ print(res.numpy()) # [[ True True] [ True False]]
diff --git a/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst b/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst
index 2482c32a5b1e4dad4d49b95f8665b9fbbf058128..502a5a60e55ea3384cc5a2b579118085bccb9e1f 100644
--- a/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/logical_xor_cn.rst
@@ -3,47 +3,35 @@
logical_xor
-------------------------------
-.. py:function:: paddle.fluid.layers.logical_xor(x, y, out=None, name=None)
+.. py:function:: paddle.logical_xor(x, y, out=None, name=None)
-该OP逐元素的对 ``X`` 和 ``Y`` 两LoDTensor/Tensor进行逻辑异或运算。
+该OP逐元素的对 ``X`` 和 ``Y`` 进行逻辑异或运算。
.. math::
Out = (X || Y) \&\& !(X \&\& Y)
-参数:
- - **x** (Variable)- 逻辑异或运算的第一个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **y** (Variable)- 逻辑异或运算的第二个输入,是一个多维的LoDTensor/Tensor,数据类型只能是bool。
- - **out** (Variable,可选)- 指定算子输出结果的LoDTensor/Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。
- - **name** (str,可选)- 该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_Name` ,默认值为None。
-
+.. note::
+ ``paddle.logical_xor`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。
-返回:与 ``x`` 维度相同,数据类型相同的LoDTensor/Tensor。
-
-返回类型:Variable
+参数:
+ - **x** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **y** (Tensor)- 输入的 `Tensor` ,数据类型为:bool。
+ - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor` ,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。
+ - **name** (str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。
+返回: ``Tensor`` , 维度``x`` 维度相同,存储运算后的结果。
**代码示例:**
.. code-block:: python
- import paddle.fluid as fluid
- import numpy as np
-
- # Graph organizing
- x = fluid.layers.data(name='x', shape=[2], dtype='bool')
- y = fluid.layers.data(name='y', shape=[2], dtype='bool')
- res = fluid.layers.logical_xor(x=x, y=y)
- # The comment lists another available method.
- # res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
- # fluid.layers.logical_xor(x=x, y=y, out=res)
-
- # Create an executor using CPU as an example
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
-
- # Execute
- x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
- y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
- res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
- print(res_val) # [[False, True], [False, True]]
+ import paddle
+ import numpy as np
+ paddle.disable_static()
+ x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
+ y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
+ x = paddle.to_tensor(x_data)
+ y = paddle.to_tensor(y_data)
+ res = paddle.logical_xor(x, y)
+ print(res.numpy()) # [[False, True], [ True, False]]
diff --git a/doc/fluid/api_cn/layers_cn/lrn_cn.rst b/doc/fluid/api_cn/layers_cn/lrn_cn.rst
index df97c8b979a98df4d1306839ea149ad1d9dbbe79..0465849f1c66e0bb8ce6bbdf3c987e45de01bffb 100644
--- a/doc/fluid/api_cn/layers_cn/lrn_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/lrn_cn.rst
@@ -5,6 +5,12 @@ lrn
.. py:function:: paddle.fluid.layers.lrn(input, n=5, k=1.0, alpha=0.0001, beta=0.75, name=None, data_format='NCHW')
+:alias_main: paddle.nn.functional.lrn
+:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
+:old_api: paddle.fluid.layers.lrn
+
+
+
该OP实现了局部响应正则化层(Local Response Normalization Layer),用于对局部输入区域正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考: `ImageNet Classification with Deep Convolutional Neural Networks `_
diff --git a/doc/fluid/api_cn/layers_cn/lstm_cn.rst b/doc/fluid/api_cn/layers_cn/lstm_cn.rst
index 206d8227ff84ee97ad1c6f914b4f71e69dca2e68..f42882efaf70c2827f07bc1bbd4f14f0e4710589 100644
--- a/doc/fluid/api_cn/layers_cn/lstm_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/lstm_cn.rst
@@ -3,14 +3,17 @@
lstm
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.lstm(input, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=0.0, is_bidirec=False, is_test=False, name=None, default_initializer=None, seed=-1)
+:api_attr: 声明式编程模式(静态图)
+
+
+
.. note::
该OP仅支持 GPU 设备运行
-该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。
+该OP实现了 LSTM,即 Long-Short Term Memory(长短期记忆)运算 - `Hochreiter, S., & Schmidhuber, J. (1997) `_。
该OP的实现不包括 diagonal/peephole 连接,参见 `Gers, F. A., & Schmidhuber, J. (2000) `_。
如果需要使用 peephole 连接方法,请使用 :ref:`cn_api_fluid_layers_dynamic_lstm` 。
@@ -57,7 +60,7 @@ lstm
返回: 经过lstm运算输出的三个Tensor的tuple,包括
-- rnn_out:LSTM hidden的输出结果的Tensor,数据类型与input一致,维度为 :math:`[seq\_len, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[seq\_len, batch\_size, hidden\_size*2]`
+- rnn_out:LSTM hidden的输出结果的Tensor,数据类型与input一致,维度为 :math:`[batch\_size, seq\_len, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[batch\_size, seq\_len, hidden\_size*2]`
- last_h:LSTM最后一步的hidden状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]`
- last_c:LSTM最后一步的cell状态的Tensor,数据类型与input一致,维度为 :math:`[num\_layers, batch\_size, hidden\_size]` 。如果 ``is_bidirec`` 设置为True,则维度为 :math:`[num\_layers*2, batch\_size, hidden\_size]`
@@ -73,12 +76,11 @@ lstm
emb_dim = 256
vocab_size = 10000
data = fluid.layers.data(name='x', shape=[-1, 100, 1],
- dtype='int32')
+ dtype='int64')
emb = fluid.layers.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True)
batch_size = 20
max_len = 100
dropout_prob = 0.2
- seq_len = 100
hidden_size = 150
num_layers = 1
init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
@@ -87,7 +89,7 @@ lstm
rnn_out, last_h, last_c = layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob)
rnn_out.shape # (-1, 100, 150)
last_h.shape # (1, 20, 150)
- layt_c.shape # (1, 20, 150)
+ last_c.shape # (1, 20, 150)
diff --git a/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst b/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst
index e24a932b38afae0e9bb6e24cf0afa5ddd5afff22..7e33fb3b748456950cbc55ac3fef511965bd82cb 100644
--- a/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/lstm_unit_cn.rst
@@ -3,10 +3,13 @@
lstm_unit
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.lstm_unit(x_t, hidden_t_prev, cell_t_prev, forget_bias=0.0, param_attr=None, bias_attr=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
Long-Short Term Memory(LSTM)循环神经网络计算单元。该OP用于完成单个时间步内LSTM的计算,基于论文 `RECURRENT NEURAL NETWORK REGULARIZATION `_ 中的描述实现,
@@ -48,26 +51,18 @@ Long-Short Term Memory(LSTM)循环神经网络计算单元。该OP用于完
**代码示例**:
.. code-block:: python
-
+
import paddle.fluid as fluid
-
+
dict_dim, emb_dim, hidden_dim = 128, 64, 512
- data = fluid.layers.data(name='step_data', shape=[1], dtype='int32')
- x = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
- pre_hidden = fluid.layers.data(name='pre_hidden', shape=[hidden_dim], dtype='float32')
- pre_cell = fluid.layers.data(name='pre_cell', shape=[hidden_dim], dtype='float32')
+ data = fluid.data(name='step_data', shape=[None], dtype='int64')
+ x = fluid.embedding(input=data, size=[dict_dim, emb_dim])
+ pre_hidden = fluid.data(
+ name='pre_hidden', shape=[None, hidden_dim], dtype='float32')
+ pre_cell = fluid.data(
+ name='pre_cell', shape=[None, hidden_dim], dtype='float32')
hidden = fluid.layers.lstm_unit(
x_t=x,
hidden_t_prev=pre_hidden,
cell_t_prev=pre_cell)
-
-
-
-
-
-
-
-
-
-
-
+
diff --git a/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst b/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst
index 8837b7293818083480e875d86993c49d944d167b..0412f85fc6f91c9534d485ab7ac7584d74e80251 100644
--- a/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/margin_rank_loss_cn.rst
@@ -5,6 +5,12 @@ margin_rank_loss
.. py:function:: paddle.fluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None)
+:alias_main: paddle.nn.functional.margin_rank_loss
+:alias: paddle.nn.functional.margin_rank_loss,paddle.nn.functional.loss.margin_rank_loss
+:old_api: paddle.fluid.layers.margin_rank_loss
+
+
+
margin rank loss(间隔排序损失)层。在排序问题中,它可以比较来自排序网络的输入 ``left`` 和输入 ``right`` 的得分。
可用如下等式定义:
diff --git a/doc/fluid/api_cn/layers_cn/matmul_cn.rst b/doc/fluid/api_cn/layers_cn/matmul_cn.rst
index 27f71423f2e831034b4a3c23745b1e5132554294..8514a410c290fe9de6004751329d2439772bcd99 100644
--- a/doc/fluid/api_cn/layers_cn/matmul_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/matmul_cn.rst
@@ -5,6 +5,9 @@ matmul
.. py:function:: paddle.fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
+
+
+
输入 ``x`` 和输入 ``y`` 矩阵相乘。
两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。
diff --git a/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst b/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f0c8af1e6cb09f7137d40bd4899395a3cb6e4e41
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/matrix_nms_cn.rst
@@ -0,0 +1,59 @@
+.. _cn_api_fluid_layers_matrix_nms:
+
+matrix_nms
+-------------------------------
+
+
+.. py:function:: paddle.fluid.layers.matrix_nms(bboxes, scores, score_threshold, post_threshold, nms_top_k, keep_top_k, use_gaussian=False, gaussian_sigma=2., background_label=0, normalized=True, return_index=False, name=None)
+
+:alias_main: paddle.nn.functional.matrix_nms
+:alias: paddle.nn.functional.matrix_nms,paddle.nn.functional.extension.matrix_nms
+:old_api: paddle.fluid.layers.matrix_nms
+
+
+
+**Matrix NMS**
+
+该OP使用Matrix NMS算法对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。
+
+如果提供 ``score_threshold`` 阈值且 ``nms_top_k`` 大于-1,则选择置信度分数最大的k个框。 然后按照Matrix NMS算法对分数进行衰减。经过抑制后,如果 ``keep_top_k`` 大于-1, 则每张图片最终保留 ``keep_top_k`` 个检测框。
+
+在NMS步骤后,如果keep_top_k大于-1,则每个图像最多保留keep_top_k个框(bounding box)。
+
+
+参数:
+ - **bboxes** (Variable) - 形为[N,M,4]的3-D张量,表示将预测M个边界框的预测位置, N是批大小(batch size)。当边界框(bounding box)大小等于4时,每个边界框有四个坐标值,布局为[xmin,ymin,xmax,ymax]。数据类型为float32或float64。
+ - **scores** (Variable) – 形为[N,C,M]的3-D张量,表示预测的置信度。 N是批大小(batch size),C是种类数目,M是边界框bounding box的数量。对于每个类别,存在对应于M个边界框的总M个分数。请注意,M等于bboxes的第二维。数据类型为float32或float64。
+ - **score_threshold** (float) – 过滤掉低置信度分数的边界框的阈值。
+ - **post_threshold** (float) – 经过NMS衰减后,过滤掉低置信度分数的边界框的阈值。
+ - **nms_top_k** (int) – 基于 score_threshold 的过滤检测后,根据置信度保留的最大检测次数。
+ - **keep_top_k** (int) – 经过NMS抑制后, 最终保留的最大检测次数。如果设置为 -1 ,则则保留全部。
+ - **use_gaussian** (bool) – 是否使用高斯函数衰减。默认值:False 。
+ - **gaussian_sigma** (float) – 高斯函数的Sigma值,默认值:2.0 。
+ - **background_label** (int) – 背景标签(类别)的索引,如果设置为 0 ,则忽略背景标签(类别)。如果设置为 -1 ,则考虑所有类别。默认值:0
+ - **normalized** (bool) – 检测是否已经经过正则化。默认值:True 。
+ - **return_index** (bool) – 是否同时返回保留检测框的序号。默认值:False 。
+ - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。
+
+返回:
+ - **Out** (Variable) - 形为[No,6]的2-D LoDTensor,表示检测结果。每行有6个值:[标签label,置信度confidence,xmin,ymin,xmax,ymax]。或形为[No,10]的2-D LoDTensor,用来表示检测结果。 每行有10个值:[标签label,置信度confidence,x1,y1,x2,y2,x3,y3,x4,y4]。 No是检测的总数。 如果对所有图像都没有检测到的box,则lod将设置为{1},而Out仅包含一个值-1。 (1.3版本之后,当未检测到box时,lod从{0}更改为{1})
+ - **Index** (Variable) - 形为[No,1]的2-D LoDTensor,表示检测结果在整个批次中的序号。
+
+
+**代码示例**
+
+.. code-block:: python
+
+ import paddle.fluid as fluid
+ boxes = fluid.data(name='bboxes', shape=[None,81, 4],
+ dtype='float32', lod_level=1)
+ scores = fluid.data(name='scores', shape=[None,81],
+ dtype='float32', lod_level=1)
+ out = fluid.layers.matrix_nms(bboxes=boxes,
+ scores=scores,
+ background_label=0,
+ score_threshold=0.5,
+ post_threshold=0.1,
+ nms_top_k=400,
+ keep_top_k=200,
+ normalized=False)
diff --git a/doc/fluid/api_cn/layers_cn/maxout_cn.rst b/doc/fluid/api_cn/layers_cn/maxout_cn.rst
index af0a58cf72d01f0dc3576bce9e6488310ae174be..2f73289029229de4436d34bb99030a5eb958d1fb 100644
--- a/doc/fluid/api_cn/layers_cn/maxout_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/maxout_cn.rst
@@ -5,6 +5,12 @@ maxout
.. py:function:: paddle.fluid.layers.maxout(x, groups, name=None, axis=1)
+:alias_main: paddle.nn.functional.maxout
+:alias: paddle.nn.functional.maxout,paddle.nn.functional.activation.maxout
+:old_api: paddle.fluid.layers.maxout
+
+
+
假设输入形状为(N, Ci, H, W),输出形状为(N, Co, H, W),则 :math:`Co=Ci/groups` 运算公式如下:
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/mean_cn.rst b/doc/fluid/api_cn/layers_cn/mean_cn.rst
index 738a19d683524a622881f4f4da067b71a2ed0b83..69363e1c6ba3934a5ea1f5622d5233552b47fc84 100644
--- a/doc/fluid/api_cn/layers_cn/mean_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/mean_cn.rst
@@ -5,6 +5,12 @@ mean
.. py:function:: paddle.fluid.layers.mean(x, name=None)
+:alias_main: paddle.mean
+:alias: paddle.mean,paddle.tensor.mean,paddle.tensor.stat.mean
+:old_api: paddle.fluid.layers.mean
+
+
+
计算 ``x`` 所有元素的平均值。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst b/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst
index 4525b968f021a96eecb3aaa1552dc2d3e1b251f7..254c9d9e5a4200741675d7c926085526bda5862f 100644
--- a/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/mean_iou_cn.rst
@@ -5,6 +5,9 @@ mean_iou
.. py:function:: paddle.fluid.layers.mean_iou(input, label, num_classes)
+
+
+
该OP计算均值IOU, 均值IOU(Mean Intersection-Over-Union)是语义图像分割中的常用的评价指标之一,它首先计算每个类的IOU,然后计算类之间的平均值。IOU定义如下:
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst b/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst
index 9e0dc9cdb719a8159f5278cfe002f6a05d1bc6a7..d44783ca46c84322e25a4d16a767663ccad95d25 100644
--- a/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/merge_selected_rows_cn.rst
@@ -5,6 +5,9 @@ merge_selected_rows
.. py:function:: paddle.fluid.layers.merge_selected_rows(x, name=None)
+
+
+
累加合并 `SelectedRows `_ ( ``x`` ) 中的重复行,并对行值由小到大重新排序。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst b/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst
index ff8f9972bf53ba5db5f3ade1e1022c04e5fc1719..59678570f9e91da8e2ad46212955ea0802f8a462 100644
--- a/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/mse_loss_cn.rst
@@ -5,6 +5,12 @@ mse_loss
.. py:function:: paddle.fluid.layers.mse_loss(input,label)
+:alias_main: paddle.nn.functional.mse_loss
+:alias: paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss
+:old_api: paddle.fluid.layers.mse_loss
+
+
+
该OP用于计算预测值和目标值的均方差误差。
对于预测值input和目标值label,公式为:
diff --git a/doc/fluid/api_cn/layers_cn/mul_cn.rst b/doc/fluid/api_cn/layers_cn/mul_cn.rst
index 03993fc7ee30d38bf62ce631949fa188715acf5d..4ce54f1a02584347177f65889ec546d5352cff50 100644
--- a/doc/fluid/api_cn/layers_cn/mul_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/mul_cn.rst
@@ -5,6 +5,9 @@ mul
.. py:function:: paddle.fluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None)
+
+
+
mul算子
此运算是用于对输入x和y执行矩阵乘法。
公式是:
diff --git a/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst b/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst
index 08c7830002290a9b5d02c60dae0e67aa37ae090d..a8fb7337f826d00f919a382c0377acbba01b1f5c 100644
--- a/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/multi_box_head_cn.rst
@@ -3,10 +3,13 @@
multi_box_head
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.multi_box_head(inputs, image, base_size, num_classes, aspect_ratios, min_ratio=None, max_ratio=None, min_sizes=None, max_sizes=None, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.2, 0.2], flip=True, clip=False, kernel_size=1, pad=0, stride=1, name=None, min_max_aspect_ratios_order=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
基于SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅SSD论文 `SSD:Single Shot MultiBox Detector `_ 的2.2节。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst b/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst
index 7dc576160b68917901ddbc71649fc94eb9fb118f..12e3c804d95d81742116abfbf8bc71090621db1c 100644
--- a/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/multiclass_nms_cn.rst
@@ -5,6 +5,12 @@ multiclass_nms
.. py:function:: paddle.fluid.layers.multiclass_nms(bboxes, scores, score_threshold, nms_top_k, keep_top_k, nms_threshold=0.3, normalized=True, nms_eta=1.0, background_label=0, name=None)
+:alias_main: paddle.nn.functional.multiclass_nms
+:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
+:old_api: paddle.fluid.layers.multiclass_nms
+
+
+
**多分类NMS**
该OP用于对边界框(bounding box)和评分(scores)执行多类非极大值抑制(NMS)。
diff --git a/doc/fluid/api_cn/layers_cn/multiplex_cn.rst b/doc/fluid/api_cn/layers_cn/multiplex_cn.rst
index 03c3d091360bfbf3b606b02f4b5c1041124b6b1d..1cc5bf39c256f14165cbf87df1a15e02cee3a070 100644
--- a/doc/fluid/api_cn/layers_cn/multiplex_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/multiplex_cn.rst
@@ -5,6 +5,12 @@ multiplex
.. py:function:: paddle.fluid.layers.multiplex(inputs, index)
+:alias_main: paddle.multiplex
+:alias: paddle.multiplex,paddle.tensor.multiplex,paddle.tensor.math.multiplex
+:old_api: paddle.fluid.layers.multiplex
+
+
+
根据给定的index参数,该OP从每个输入Tensor中选择特定行构造输出Tensor。
设该OP输入包含 :math:`m` 个Tensor,其中 :math:`I_{i}` 代表第i个输入Tensor,:math:`i` 处于区间 :math:`[0,m)`。
diff --git a/doc/fluid/api_cn/layers_cn/multiply_cn.rst b/doc/fluid/api_cn/layers_cn/multiply_cn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4bb5ee012326ad9ae7ded391a124312f5654b769
--- /dev/null
+++ b/doc/fluid/api_cn/layers_cn/multiply_cn.rst
@@ -0,0 +1,79 @@
+.. _cn_api_fluid_layers_multiply:
+
+multiply
+-------------------------------
+
+.. py:function:: paddle.multiply(x, y, axis=-1, name=None)
+
+:alias_main: paddle.multiply
+:alias: paddle.multiply, paddle.tensor.multiply, paddle.tensor.math.multiply
+
+
+
+该OP是逐元素相乘算子,输入 ``x`` 与输入 ``y`` 逐元素相乘,并将各个位置的输出元素保存到返回结果中。
+
+等式是:
+
+.. math::
+ Out = X \odot Y
+
+- :math:`X` :多维Tensor。
+- :math:`Y` :维度必须小于等于X维度的Tensor。
+
+对于这个运算算子有2种情况:
+ 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。
+ 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。
+
+对于情况2:
+ 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。
+ 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。
+ 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。
+
+例如:
+
+.. code-block:: text
+
+ shape(X) = (2, 3, 4, 5), shape(Y) = (,)
+ shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
+ shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
+ shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
+ shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
+ shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
+
+参数:
+ - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。
+ - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。
+ - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。
+ - **name** (string,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。
+
+
+返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。
+
+返回类型: Variable。
+
+**代码示例 1**
+
+.. code-block:: python
+
+ import paddle
+ import numpy as np
+ paddle.enable_imperative()
+ x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
+ y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
+ x = paddle.imperative.to_variable(x_data)
+ y = paddle.imperative.to_variable(y_data)
+ res = paddle.multiply(x, y)
+ print(res.numpy()) # [[5, 12], [21, 32]]
+ x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
+ y_data = np.array([1, 2], dtype=np.float32)
+ x = paddle.imperative.to_variable(x_data)
+ y = paddle.imperative.to_variable(y_data)
+ res = paddle.multiply(x, y, axis=1)
+ print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]]
+
+
+
+
+
+
+
diff --git a/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst b/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst
index fbc2ca21ee5e9459d17d7e3d237bad2bc32193ec..8fc60de95cc71c793c0d3b825bf63817654fbacc 100644
--- a/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/natural_exp_decay_cn.rst
@@ -5,6 +5,12 @@ natural_exp_decay
.. py:function:: paddle.fluid.layers.natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False)
+:alias_main: paddle.nn.functional.natural_exp_decay
+:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay
+:old_api: paddle.fluid.layers.natural_exp_decay
+
+
+
将自然指数衰减运用到初始学习率上。
训练模型时,在训练过程中降低学习率。 自然指数衰减使用自然指数来计算衰减倍率,每 ``decay_steps`` 步衰减倍率的自然指数幂次项上增加 ``decay_rate`` 。
diff --git a/doc/fluid/api_cn/layers_cn/nce_cn.rst b/doc/fluid/api_cn/layers_cn/nce_cn.rst
index 20b66474c0e1fee0276947b4fdb64e6fcd237cda..ef4532b84619f8444cd7a01c6821f7eb26c2739b 100644
--- a/doc/fluid/api_cn/layers_cn/nce_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/nce_cn.rst
@@ -3,10 +3,13 @@
nce
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None, name=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False)
+:api_attr: 声明式编程模式(静态图)
+
+
+
计算并返回噪音对比估计损失值( noise-contrastive estimation training loss)。
请参考 `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models
`_
@@ -40,35 +43,35 @@ nce
window_size = 5
words = []
- for i in xrange(window_size):
- words.append(fluid.layers.data(
- name='word_{0}'.format(i), shape=[1], dtype='int64'))
+ for i in range(window_size):
+ words.append(fluid.data(
+ name='word_{0}'.format(i), shape=[-1, 1], dtype='int64'))
dict_size = 10000
label_word = int(window_size / 2) + 1
embs = []
- for i in xrange(window_size):
+ for i in range(window_size):
if i == label_word:
continue
emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32],
- param_attr='embed', is_sparse=True)
+ param_attr='embed', is_sparse=True)
embs.append(emb)
embs = fluid.layers.concat(input=embs, axis=1)
loss = fluid.layers.nce(input=embs, label=words[label_word],
- num_total_classes=dict_size, param_attr='nce.w_0',
- bias_attr='nce.b_0')
+ num_total_classes=dict_size, param_attr='nce.w_0',
+ bias_attr='nce.b_0')
- # 或使用自定义分布
+ #or use custom distribution
dist = np.array([0.05,0.5,0.1,0.3,0.05])
loss = fluid.layers.nce(input=embs, label=words[label_word],
- num_total_classes=5, param_attr='nce.w_1',
- bias_attr='nce.b_1',
- num_neg_samples=3,
- sampler="custom_dist",
- custom_dist=dist)
+ num_total_classes=5, param_attr='nce.w_1',
+ bias_attr='nce.b_1',
+ num_neg_samples=3,
+ sampler="custom_dist",
+ custom_dist=dist)
diff --git a/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst b/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst
index 4769b6dd7192b523fad7528a6bc0fe30773a2991..00a7ad2321b6864891b8cd1c8d04a32b39c30b5e 100644
--- a/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/noam_decay_cn.rst
@@ -3,7 +3,13 @@
noam_decay
-------------------------------
-.. py:function:: paddle.fluid.layers.noam_decay(d_model,warmup_steps)
+.. py:function:: paddle.fluid.layers.noam_decay(d_model, warmup_steps)
+
+:alias_main: paddle.nn.functional.noam_decay
+:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay
+:old_api: paddle.fluid.layers.noam_decay
+
+
Noam衰减方法
@@ -14,11 +20,12 @@ noam衰减的numpy实现如下:
import paddle.fluid as fluid
import numpy as np
# 设置超参数
+ base_lr = 0.01
d_model = 2
current_steps = 20
warmup_steps = 200
# 计算
- lr_value = np.power(d_model, -0.5) * np.min([
+ lr_value = base_lr * np.power(d_model, -0.5) * np.min([
np.power(current_steps, -0.5),
np.power(warmup_steps, -1.5) * current_steps])
@@ -27,6 +34,7 @@ noam衰减的numpy实现如下:
参数:
- **d_model** (Variable|int) - 模型的输入、输出向量特征维度。类型可设置为标量Tensor,或int值。
- **warmup_steps** (Variable|int) - 预热步数,类型可设置为标量Tensor,或int值。
+ - **learning_rate** (Variable|float|int,可选) - 初始学习率。如果类型为Variable,则为shape为[1]的Tensor,数据类型为float32或float64;也可以是python的int类型。默认值为1.0。
返回:衰减的学习率
@@ -41,7 +49,8 @@ noam衰减的numpy实现如下:
learning_rate = 0.01
lr = fluid.layers.learning_rate_scheduler.noam_decay(
1/(warmup_steps *(learning_rate ** 2)),
- warmup_steps)
+ warmup_steps,
+ learning_rate)
diff --git a/doc/fluid/api_cn/layers_cn/not_equal_cn.rst b/doc/fluid/api_cn/layers_cn/not_equal_cn.rst
index f9e59aa65429b2730dd6b8e7cf8beb9e8f99f9a1..5a9cedf60cc03d67d8571424cffc2af62a583184 100644
--- a/doc/fluid/api_cn/layers_cn/not_equal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/not_equal_cn.rst
@@ -3,7 +3,13 @@
not_equal
-------------------------------
-.. py:function:: paddle.fluid.layers.not_equal(x, y, cond=None)
+.. py:function:: paddle.fluid.layers.not_equal(x, y, cond=None, name=None)
+
+:alias_main: paddle.not_equal
+:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
+:old_api: paddle.fluid.layers.not_equal
+
+
该OP逐元素地返回 :math:`x != y` 的逻辑值,使用重载算子 `!=` 可以有相同的计算函数效果。
@@ -11,8 +17,7 @@ not_equal
- **x** (Variable) – 进行比较的第一个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **y** (Variable) – 进行比较的第二个输入,是一个多维的Tensor,数据类型可以是float32,float64,int32,int64。
- **cond** (Variable,可选) – 如果为None,则创建一个Tensor来作为进行比较的输出结果,该Tensor的shape和数据类型和输入x一致;如果不为None,则将Tensor作为该OP的输出,数据类型和数据shape需要和输入x一致。默认值为None。
- - **force_cpu** (bool,可选) – 是否强制将输出Tensor存储在CPU。默认值为None,表示将输出Tensor存储在CPU内存上;如果为False,则将输出Tensor存储在运行设备内存上。
-
+ - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
返回:输出结果的Tensor,数据的shape和输入x一致。
返回类型:变量(Variable),数据类型为bool类型。
@@ -24,13 +29,12 @@ not_equal
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
- label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
- limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
- out_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
- out = fluid.layers.not_equal(x=label, y=limit) # out=[False, True]
- out1 = fluid.layers.not_equal(x=label, y=limit, cond=out_cond) #out1=[False, True] out_cond=[False, True]
- out2 = fluid.layers.not_equal(x=label, y=limit, force_cpu=False) #out2=[False, True]
- out3 = label != limit #out3=[False, True]
+ label = layers.assign(np.array([2, 3], dtype='int32'))
+ limit = layers.assign(np.array([3, 2], dtype='int32'))
+ out = fluid.layers.not_equal(x=label, y=limit) #out=[True, True]
+ out1 = label != limit #out1=[True, True]
+
+
diff --git a/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst b/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst
index 7b8eb851431df2bd282b72774b52879f0acee473..2b6c681656b6c7d452fcdf1999d52dd2c2cdcad1 100644
--- a/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/npair_loss_cn.rst
@@ -5,6 +5,12 @@ npair_loss
.. py:function:: paddle.fluid.layers.npair_loss(anchor, positive, labels, l2_reg=0.002)
+:alias_main: paddle.nn.functional.npair_loss
+:alias: paddle.nn.functional.npair_loss,paddle.nn.functional.loss.npair_loss
+:old_api: paddle.fluid.layers.npair_loss
+
+
+
**Npair Loss Layer**
参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_
diff --git a/doc/fluid/api_cn/layers_cn/one_hot_cn.rst b/doc/fluid/api_cn/layers_cn/one_hot_cn.rst
index d8f66f65e0838418089e18f77648021c635dc539..2aaaf117e6fc9dc5ab26032db91bef286f14e567 100644
--- a/doc/fluid/api_cn/layers_cn/one_hot_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/one_hot_cn.rst
@@ -5,6 +5,9 @@ one_hot
.. py:function:: paddle.fluid.layers.one_hot(input, depth, allow_out_of_range=False)
+
+
+
**注意:此OP要求输入Tensor shape的最后一维必须为1。此OP将在未来的版本中被移除!推荐使用fluid.** :ref:`cn_api_fluid_one_hot` 。
该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。
diff --git a/doc/fluid/api_cn/layers_cn/ones_cn.rst b/doc/fluid/api_cn/layers_cn/ones_cn.rst
index 881ebd4546387be2a231166bd14bef7d2eb6c02f..1eb93c5e0a86886cf3d88e93a6cea0c6e8e23e92 100644
--- a/doc/fluid/api_cn/layers_cn/ones_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/ones_cn.rst
@@ -5,18 +5,18 @@ ones
.. py:function:: paddle.fluid.layers.ones(shape,dtype,force_cpu=False)
-**ones**
-
-该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor,该OP会将stop_gradient设置为True,即停止梯度更新。
+该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。
参数:
- - **shape** (tuple|list) - 输出Tensor的形状。
- - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为float16、float32、float64、int32或int64。
- - **force_cpu** (bool) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。
+ - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。
+ - **dtype** (np.dtype|str) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。
+ - **force_cpu** (bool, 可选) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。
返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。
-返回类型:Variable
+抛出异常:
+ - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。
+ - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。
**代码示例**:
diff --git a/doc/fluid/api_cn/layers_cn/ones_like_cn.rst b/doc/fluid/api_cn/layers_cn/ones_like_cn.rst
index 06082a02bae8d5b6e1595d3adafe5aea063dd110..5d1e6a89788690d771f0d1cb986e4bcf425e5968 100644
--- a/doc/fluid/api_cn/layers_cn/ones_like_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/ones_like_cn.rst
@@ -5,6 +5,9 @@ ones_like
.. py:function:: paddle.fluid.layers.ones_like(x, out=None)
+
+
+
ones_like
该功能创建一个形状与类型与x相似的张量,初始值为1。
diff --git a/doc/fluid/api_cn/layers_cn/pad2d_cn.rst b/doc/fluid/api_cn/layers_cn/pad2d_cn.rst
index 9f2cb0673b10e867239dc68763eb396a0318ebbd..08e937f50a7eb33bb21adadc2039984ff221cf31 100644
--- a/doc/fluid/api_cn/layers_cn/pad2d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pad2d_cn.rst
@@ -5,6 +5,12 @@ pad2d
.. py:function:: paddle.fluid.layers.pad2d(input, paddings=[0, 0, 0, 0], mode='constant', pad_value=0.0, data_format='NCHW', name=None)
+:alias_main: paddle.nn.functional.pad2d
+:alias: paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d
+:old_api: paddle.fluid.layers.pad2d
+
+
+
该OP依照 paddings 和 mode 属性对input进行2维 ``pad`` 。
参数:
@@ -19,36 +25,34 @@ pad2d
返回类型:Variable
-示例:
+**示例**:
.. code-block:: text
- 假设X是输入图像:
+ Input = [[[[1., 2., 3.],
+ [4., 5., 6.]]]]
- X = [[1, 2, 3],
- [4, 5, 6]]
+ Case 0:
+ paddings = [0, 1, 2, 3],
+ mode = 'constant'
+ pad_value = 0
+ Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
+ [0., 0., 4., 5., 6., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.]]]]
- Case 0:
- paddings = [0, 1, 2, 3],
- mode = 'constant'
- pad_value = 0
- Out = [[0, 0, 1, 2, 3, 0, 0, 0]
- [0, 0, 4, 5, 6, 0, 0, 0]
- [0, 0, 0, 0, 0, 0, 0, 0]]
+ Case 1:
+ paddings = [0, 1, 2, 1],
+ mode = 'reflect'
+ Out = [[[[3., 2., 1., 2., 3., 2.],
+ [6., 5., 4., 5., 6., 5.],
+ [3., 2., 1., 2., 3., 2.]]]]
- Case 1:
- paddings = [0, 1, 2, 1],
- mode = 'reflect'
- Out = [[3, 2, 1, 2, 3, 2]
- [6, 5, 4, 5, 6, 5]
- [3, 2, 1, 2, 3, 2]]
-
- Case 2:
- paddings = [0, 1, 2, 1],
- mode = 'edge'
- Out = [[1, 1, 1, 2, 3, 3]
- [4, 4, 4, 5, 6, 6]
- [4, 4, 4, 5, 6, 6]]
+ Case 2:
+ paddings = [0, 1, 2, 1],
+ mode = 'edge'
+ Out = [[[[1., 1., 1., 2., 3., 3.],
+ [4., 4., 4., 5., 6., 6.],
+ [4., 4., 4., 5., 6., 6.]]]]
@@ -56,8 +60,6 @@ pad2d
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32')
- result = fluid.layers.pad2d(input=data, paddings=[1,2,3,4], mode='reflect')
-
-
+ import paddle.fluid as fluid
+ data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
+ result = fluid.layers.pad2d(input=data, paddings=[0, 1, 2, 3], mode='reflect')
diff --git a/doc/fluid/api_cn/layers_cn/pad_cn.rst b/doc/fluid/api_cn/layers_cn/pad_cn.rst
index 04ff8cd0fc6b0e103dcabeeed99336e552ca5f9f..a9ed06ded042fe47826667b3d70acfe766181c21 100644
--- a/doc/fluid/api_cn/layers_cn/pad_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pad_cn.rst
@@ -5,26 +5,30 @@ pad
.. py:function:: paddle.fluid.layers.pad(x, paddings, pad_value=0.0, name=None)
+:alias_main: paddle.nn.functional.pad
+:alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad
+:old_api: paddle.fluid.layers.pad
+
+
+
该OP在Tensor上填充一个由 ``pad_value`` 给出的常数值,填充宽度由 ``paddings`` 指定。
其中,维度 ``i`` 中 ``x`` 内容前填充的值个数用 ``paddings[2*i]`` 表示,维度 ``i`` 中 ``x`` 内容后填充的值个数用 ``paddings[2*i+1]`` 表示。
-**样例**:
+**示例**:
-::
+.. code-block:: text
Given:
+ x = [[1, 2], [3, 4]]
- x = [[1, 2], [3, 4]]
-
- paddings = [0, 1, 1, 2]
+ paddings = [0, 1, 1, 2]
- pad_value = 0
+ pad_value = 0
Return:
-
- out = [[0, 1, 2, 0, 0]
- [0, 3, 4, 0, 0]
- [0, 0, 0, 0, 0]]
+ out = [[0, 1, 2, 0, 0]
+ [0, 3, 4, 0, 0]
+ [0, 0, 0, 0, 0]]
参数:
@@ -44,15 +48,7 @@ pad
# x 为一个秩为2的张量
import paddle.fluid as fluid
- x = fluid.layers.data(name='data', shape=[224], dtype='float32')
+ x = fluid.data(name='data', shape=[300, 300], dtype='float32')
out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
-
-
-
-
-
-
-
-
diff --git a/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst b/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst
index 3172afab76e7bd0343978a7f999a97fd9c89009a..8f0bad53df1e4c53ef64baa921b1153404d98a5b 100644
--- a/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pad_constant_like_cn.rst
@@ -5,11 +5,17 @@ pad_constant_like
.. py:function:: paddle.fluid.layers.pad_constant_like(x, y, pad_value=0.0, name=None)
+:alias_main: paddle.nn.functional.pad_constant_like
+:alias: paddle.nn.functional.pad_constant_like,paddle.nn.functional.common.pad_constant_like
+:old_api: paddle.fluid.layers.pad_constant_like
+
+
+
该OP使用 ``pad_value`` 填充 ``y`` ,填充到每个维度值的数量由x和y的形状而指定,((0,x.shape[0] - y.shape[0]), ..., (0, x.shape[i] - y.shape[i]), ..., (0, x.shape[n] - y.shape[n]))是每个维度填充的宽度,对于维度i,填充宽度 ``(0, x.shape[i] - y.shape[i])`` ,表示在y的第i维开头不填充,而在末尾填充 ``x.shape[i] - y.shape[i]`` 个位置。该OP要求y与x具有相同的秩,并且对每个维度i, ``y.shape[i] <= x.shape[i]`` 。
-**样例**
+**示例**:
-::
+.. code-block:: text
Given:
X = [[[[ 0, 1, 2],
@@ -24,30 +30,34 @@ pad_constant_like
[27, 28, 29]],
[[30, 31, 32],
[33, 34, 35]]]]
+
X.shape = (2, 3, 2, 3)
Y = [[[[35, 36, 37]],
[[38, 39, 40]],
[[41, 42, 43]]]]
+
Y.shape = (1, 3, 1, 3)
- and
+ And
pad_value = 0.
- Output is:
- out = [[[[35, 36, 37],
- [0, 0, 0]],
+ Return:
+ Out = [[[[35, 36, 37],
+ [ 0, 0, 0]],
[[38, 39, 40],
- [0, 0, 0]],
+ [ 0, 0, 0]],
[[41, 42, 43],
- [0, 0, 0]]],
- [[[0, 0, 0],
- [0, 0, 0]],
- [[0, 0, 0],
- [0, 0, 0]],
- [[0, 0, 0],
- [0, 0, 0]]]]
- out.shape = [2, 3, 2, 3]
+ [ 0, 0, 0]]],
+ [[[ 0, 0, 0],
+ [ 0, 0, 0]],
+ [[ 0, 0, 0],
+ [ 0, 0, 0]],
+ [[ 0, 0, 0],
+ [ 0, 0, 0]]]]
+
+ Out.shape = [2, 3, 2, 3]
+
参数:
- **x** (Variable)- 多维Tensor
@@ -66,8 +76,8 @@ pad_constant_like
# x是秩为4的tensor, x.shape = (2, 3, 2, 3)
# y是秩为4的tensor, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[2,3,2,3], dtype='float32')
- y = fluid.layers.data(name='y', shape=[1,3,1,3], dtype='float32')
+ x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
+ y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out是秩为4的tensor, out.shape = [2, 3 ,2 , 3]
diff --git a/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst b/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst
index 9aeea557f92951e63710679f21ea31ed0a20287f..ecb2cd6274244631a6390f22a3c09aded78fef1a 100644
--- a/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/piecewise_decay_cn.rst
@@ -5,6 +5,12 @@ piecewise_decay
.. py:function:: paddle.fluid.layers.piecewise_decay(boundaries,values)
+:alias_main: paddle.nn.functional.piecewise_decay
+:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay
+:old_api: paddle.fluid.layers.piecewise_decay
+
+
+
对初始学习率进行分段衰减。
该算法可用如下代码描述。
diff --git a/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst b/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst
index bd6b1cd77283f48943a5c46ff00d18c70309cac8..07b29dd8892aece514291f781a5e3918dc882834 100644
--- a/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pixel_shuffle_cn.rst
@@ -5,6 +5,12 @@ pixel_shuffle
.. py:function:: paddle.fluid.layers.pixel_shuffle(x, upscale_factor)
+:alias_main: paddle.nn.functional.pixel_shuffle
+:alias: paddle.nn.functional.pixel_shuffle,paddle.nn.functional.vision.pixel_shuffle
+:old_api: paddle.fluid.layers.pixel_shuffle
+
+
+
该OP将一个形为[N, C, H, W]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。
.. code-block:: text
diff --git a/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst b/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst
index 81621b34a0353275a60ae7888bb4ffcdcd91677d..06fa39efbdc758bc5e03dcf64e9554d2f627086b 100644
--- a/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/polygon_box_transform_cn.rst
@@ -5,6 +5,12 @@ polygon_box_transform
.. py:function:: paddle.fluid.layers.polygon_box_transform(input, name=None)
+:alias_main: paddle.nn.functional.polygon_box_transform
+:alias: paddle.nn.functional.polygon_box_transform,paddle.nn.functional.extension.polygon_box_transform
+:old_api: paddle.fluid.layers.polygon_box_transform
+
+
+
**PolygonBoxTransform 算子**
该op用于将偏移坐标改变为真实的坐标。
diff --git a/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst b/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst
index ec04e1964d320095ffe1946e1f5c83cdcd347fb2..e03443b0bd7d02177ef045393595f8d06dc4abf5 100644
--- a/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/polynomial_decay_cn.rst
@@ -5,6 +5,12 @@ polynomial_decay
.. py:function:: paddle.fluid.layers.polynomial_decay(learning_rate,decay_steps,end_learning_rate=0.0001,power=1.0,cycle=False)
+:alias_main: paddle.nn.functional.polynomial_decay
+:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay
+:old_api: paddle.fluid.layers.polynomial_decay
+
+
+
对初始学习率使用多项式衰减
.. code-block:: text
diff --git a/doc/fluid/api_cn/layers_cn/pool2d_cn.rst b/doc/fluid/api_cn/layers_cn/pool2d_cn.rst
index af74397a256a93162f1fec898bdf51c7a9a579a8..d1990be374c9807f6272937e7e6c6d75e3e89062 100644
--- a/doc/fluid/api_cn/layers_cn/pool2d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pool2d_cn.rst
@@ -5,6 +5,12 @@ pool2d
.. py:function:: paddle.fluid.layers.pool2d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCHW")
+:alias_main: paddle.nn.functional.pool2d
+:alias: paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d
+:old_api: paddle.fluid.layers.pool2d
+
+
+
该OP使用上述输入参数的池化配置,为二维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` ,填充 ``pool_padding`` 等参数得到输出。
输入 ``input`` 和输出(out)采用NCHW或NHWC格式,N为批大小,C是通道数,H是特征高度,W是特征宽度。
diff --git a/doc/fluid/api_cn/layers_cn/pool3d_cn.rst b/doc/fluid/api_cn/layers_cn/pool3d_cn.rst
index 47e314896d920d28782a0ce0f5f1564dfcf3dce0..d23d77b7247696b0b6e53b269a6d24b6f59e6b9d 100644
--- a/doc/fluid/api_cn/layers_cn/pool3d_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pool3d_cn.rst
@@ -5,6 +5,12 @@ pool3d
.. py:function:: paddle.fluid.layers.pool3d(input, pool_size=-1, pool_type='max', pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCDHW")
+:alias_main: paddle.nn.functional.pool3d
+:alias: paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d
+:old_api: paddle.fluid.layers.pool3d
+
+
+
该OP使用上述输入参数的池化配置,为三维空间池化操作,根据 ``input`` ,池化核大小 ``pool_size`` ,池化类型 ``pool_type`` ,步长 ``pool_stride`` 和填充 ``pool_padding`` 等参数计算输出。
输入 ``input`` 和输出(Out)采用NCDHW或NDHWC格式,其中N是批大小,C是通道数,D,H和W分别是特征的深度,高度和宽度。
diff --git a/doc/fluid/api_cn/layers_cn/pow_cn.rst b/doc/fluid/api_cn/layers_cn/pow_cn.rst
index 0da2cf5c4df954c5d158a98716338f7d252bf14f..40eaf542138527856d25a002f16a4cf29c891f47 100644
--- a/doc/fluid/api_cn/layers_cn/pow_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/pow_cn.rst
@@ -3,22 +3,25 @@
pow
-------------------------------
-.. py:function:: paddle.fluid.layers.pow(x, factor=1.0, name=None)
+.. py:function:: paddle.pow(x, exponent, name=None)
+
+
+
该OP是指数激活算子:
.. math::
- out = x^{factor}
+ out = x^{exponent}
**注意:如果需要对输入进行 elementwise_pow 操作,请查使用** :ref:`cn_api_fluid_layers_elementwise_pow` 。
参数:
- - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` ,数据类型为 ``float32`` 或 ``float64`` 。
- - **factor** (float32|Variable,可选)- ``float32`` 或形状为[1]的 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``。Pow OP的指数因子。默认值:1.0。
+ - **x** (Variable)- 多维 ``Variable``,数据类型为 ``float32`` 或 ``float64`` 。
+ - **exponent** (float32|Variable)- ``float32`` 或形状为[1]的 ``Variable``,数据类型为 ``float32``。
- **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。
-返回:维度与输入 `x` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。
+返回:维度与输入 `x` 相同的 ``Variable``,数据类型与 ``x`` 相同。
返回类型:Variable。
@@ -27,18 +30,23 @@ pow
.. code-block:: python
- import paddle.fluid as fluid
-
- x = fluid.layers.data(name="x", shape=[3,10,32,32], dtype="float32")
-
- # example 1: argument factor is float
- y_1 = fluid.layers.pow(x, factor=2.0)
- # y_1 is x^{2.0}
+ import paddle
+ import numpy as np
+ x = fluid.data(name="x", shape=[32,32], dtype="float32")
+ paddle.enable_imperative()
+
+ # example 1: exponent is a float
+ x_data = np.array([1, 2, 3])
+ exponent = 2
+ x = paddle.imperative.to_variable(x_data)
+ res = paddle.pow(x, exponent)
+ print(res.numpy()) # [1 4 9]
+
+ # example 2: exponent is a Variable
+ exponent = paddle.fill_constant(shape=[1], value=2, dtype='float32')
+ res = paddle.pow(x, exponent)
+ print(res.numpy()) # [1 4 9]
- # example 2: argument factor is Variable
- factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
- y_2 = fluid.layers.pow(x, factor=factor_tensor)
- # y_2 is x^{2.0}
diff --git a/doc/fluid/api_cn/layers_cn/prelu_cn.rst b/doc/fluid/api_cn/layers_cn/prelu_cn.rst
index 81e526344d2df1e10c7995bf07749b6fa4b972ea..b1ea4cfb569fac4c885b8292ba07f022886e0934 100644
--- a/doc/fluid/api_cn/layers_cn/prelu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/prelu_cn.rst
@@ -3,10 +3,13 @@
prelu
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.prelu(x, mode, param_attr=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
等式:
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/prior_box_cn.rst b/doc/fluid/api_cn/layers_cn/prior_box_cn.rst
index 19998e3a8448e403219c07204530ad1e0c852f7a..85383fbbe6d7f3a60d449c0db069967a12606785 100644
--- a/doc/fluid/api_cn/layers_cn/prior_box_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/prior_box_cn.rst
@@ -4,6 +4,12 @@ prior_box
-------------------------------
.. py:function:: paddle.fluid.layers.prior_box(input,image,min_sizes=None,max_sizes=None,aspect_ratios=[1.0],variance=[0.1,0.1,0.2,0.2],flip=False,clip=False,steps=[0.0,0.0],offset=0.5,name=None,min_max_aspect_ratios_order=False)
+:alias_main: paddle.nn.functional.prior_box
+:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
+:old_api: paddle.fluid.layers.prior_box
+
+
+
该OP为SSD(Single Shot MultiBox Detector)算法生成候选框。输入的每个位产生N个候选框,N由min_sizes,max_sizes和aspect_ratios的数目决定,候选框的尺寸在(min_size,max_size)之间,该尺寸根据aspect_ratios在序列中生成。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst
index 18466f5f0911b65d02752986a36043455563c596..43221ea069434bd83fd066e6f1091ca9332f2e7f 100644
--- a/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/prroi_pool_cn.rst
@@ -5,6 +5,12 @@ prroi_pool
.. py:function:: paddle.fluid.layers.prroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None)
+:alias_main: paddle.nn.functional.prroi_pool
+:alias: paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool
+:old_api: paddle.fluid.layers.prroi_pool
+
+
+
PRROIPool运算
精确区域池化方法(Precise region of interest pooling,也称为PRROIPooling)是对输入的 "感兴趣区域"(RoI)执行插值处理,将离散的特征图数据映射到一个连续空间,使用二重积分再求均值的方式实现Pooling。
@@ -28,10 +34,18 @@ PRROIPool运算
.. code-block:: python
+ ## prroi_pool without batch_roi_num
import paddle.fluid as fluid
- x = fluid.layers.data(name='x', shape=[490, 28, 28], dtype='float32')
- rois = fluid.layers.data(name='rois', shape=[4], lod_level=1, dtype='float32')
- pool_out = fluid.layers.prroi_pool(x, rois, 10, 1.0, 7, 7)
+ x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
+ rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
+ pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
+
+ ## prroi_pool with batch_roi_num
+ batchsize=4
+ x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
+ rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
+ batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
+ pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)
diff --git a/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst
index 329585766688eeff43109760e2113c90387a7fa9..92a246f63c4c04e4955322e73359398f03dd17c6 100644
--- a/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/psroi_pool_cn.rst
@@ -5,6 +5,12 @@ psroi_pool
.. py:function:: paddle.fluid.layers.psroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None)
+:alias_main: paddle.nn.functional.psroi_pool
+:alias: paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool
+:old_api: paddle.fluid.layers.psroi_pool
+
+
+
**注意 rois必须为2维LoDTensor,lod_level为1**
该OP执行PSROIPooling运算,是位置敏感的感兴趣区域池化方法(Position sensitive region of interest pooling,也称为PSROIPooling)。输入input是位置敏感的评分图,输入rois是感兴趣区域的位置坐标。PSROIPooling不同于普通ROIPooling的地方在于,输入input特征图的不同通道会跟输出特征图上的位置区域相关联,该方法是在R-FCN模型中首次提出来的,更多详细信息请参阅 https://arxiv.org/abs/1605.06409。
diff --git a/doc/fluid/api_cn/layers_cn/py_func_cn.rst b/doc/fluid/api_cn/layers_cn/py_func_cn.rst
index dca98b091f3c13f32607009a8bf0c8e41baf615b..79d11adef7a0cf8abfd256ad8d81a58945ab7d86 100644
--- a/doc/fluid/api_cn/layers_cn/py_func_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/py_func_cn.rst
@@ -3,10 +3,13 @@
py_func
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
PaddlePaddle Fluid通过py_func在Python端注册OP。py_func的设计原理在于Paddle中的LodTensor与numpy数组可以方便的互相转换,从而可使用Python中的numpy API来自定义一个Python OP。
该自定义的Python OP的前向函数是 ``func``, 反向函数是 ``backward_func`` 。 Paddle将在前向部分调用 ``func`` ,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是None)。 ``x`` 为 ``func`` 的输入,必须为LoDTensor类型; ``out`` 为 ``func`` 的输出, 既可以是LoDTensor类型, 也可以是numpy数组。
diff --git a/doc/fluid/api_cn/layers_cn/py_reader_cn.rst b/doc/fluid/api_cn/layers_cn/py_reader_cn.rst
index 602a1f8a254e27b95d6fd95fcdec514791f60af2..2f8f6f631479e9220221535b4ead4d0e753ead35 100644
--- a/doc/fluid/api_cn/layers_cn/py_reader_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/py_reader_cn.rst
@@ -3,10 +3,13 @@
py_reader
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True)
+:api_attr: 声明式编程模式(静态图)
+
+
+
创建一个在Python端提供数据的reader
@@ -73,70 +76,69 @@ py_reader
.. code-block:: python
- import paddle
- import paddle.fluid as fluid
- import paddle.dataset.mnist as mnist
-
- def network(reader):
- img, label = fluid.layers.read_file(reader)
- # 用户自定义网络,此处以softmax回归为例
- predict = fluid.layers.fc(input=img, size=10, act='softmax')
- loss = fluid.layers.cross_entropy(input=predict, label=label)
- return fluid.layers.mean(loss)
-
- # 新建 train_main_prog 和 train_startup_prog
- train_main_prog = fluid.Program()
- train_startup_prog = fluid.Program()
- with fluid.program_guard(train_main_prog, train_startup_prog):
- # 使用 fluid.unique_name.guard() 实现与test program的参数共享
- with fluid.unique_name.guard():
- train_reader = fluid.layers.py_reader(capacity=64,
- shapes=[(-1, 1, 28, 28), (-1, 1)],
- dtypes=['float32', 'int64'],
+ import paddle
+ import paddle.fluid as fluid
+ import paddle.dataset.mnist as mnist
+
+ def network(reader):
+ img, label = fluid.layers.read_file(reader)
+ # 用户自定义网络,此处以softmax回归为例
+ predict = fluid.layers.fc(input=img, size=10, act='softmax')
+ loss = fluid.layers.cross_entropy(input=predict, label=label)
+ return fluid.layers.mean(loss)
+
+ # 新建 train_main_prog 和 train_startup_prog
+ train_main_prog = fluid.Program()
+ train_startup_prog = fluid.Program()
+ with fluid.program_guard(train_main_prog, train_startup_prog):
+ # 使用 fluid.unique_name.guard() 实现与test program的参数共享
+ with fluid.unique_name.guard():
+ train_reader = fluid.layers.py_reader(capacity=64,
+ shapes=[(-1, 1, 28, 28), (-1, 1)],
+ dtypes=['float32', 'int64'],
name='train_reader')
- train_reader.decorate_paddle_reader(
- paddle.reader.shuffle(paddle.batch(mnist.train(),
- batch_size=5),
+ train_reader.decorate_paddle_reader(
+ paddle.reader.shuffle(paddle.batch(mnist.train(),
+ batch_size=5),
buf_size=500))
- train_loss = network(train_reader) # 一些网络定义
- adam = fluid.optimizer.Adam(learning_rate=0.01)
- adam.minimize(train_loss)
-
- # Create test_main_prog and test_startup_prog
- test_main_prog = fluid.Program()
- test_startup_prog = fluid.Program()
- with fluid.program_guard(test_main_prog, test_startup_prog):
- # 使用 fluid.unique_name.guard() 实现与train program的参数共享
- with fluid.unique_name.guard():
- test_reader = fluid.layers.py_reader(capacity=32,
- shapes=[(-1, 1, 28, 28), (-1, 1)],
- dtypes=['float32', 'int64'],
- name='test_reader')
- test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
-
- test_loss = network(test_reader)
-
- fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
- fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
-
- train_exe = fluid.ParallelExecutor(use_cuda=True,
- loss_name=train_loss.name, main_program=train_main_prog)
- test_exe = fluid.ParallelExecutor(use_cuda=True,
- loss_name=test_loss.name, main_program=test_main_prog)
- for epoch_id in range(10):
- train_reader.start()
- try:
- while True:
- train_exe.run(fetch_list=[train_loss.name])
- except fluid.core.EOFException:
- train_reader.reset()
-
- test_reader.start()
- try:
- while True:
- test_exe.run(fetch_list=[test_loss.name])
- except fluid.core.EOFException:
- test_reader.reset()
+ train_loss = network(train_reader) # 一些网络定义
+ adam = fluid.optimizer.Adam(learning_rate=0.01)
+ adam.minimize(train_loss)
+
+ # Create test_main_prog and test_startup_prog
+ test_main_prog = fluid.Program()
+ test_startup_prog = fluid.Program()
+ with fluid.program_guard(test_main_prog, test_startup_prog):
+ # 使用 fluid.unique_name.guard() 实现与train program的参数共享
+ with fluid.unique_name.guard():
+ test_reader = fluid.layers.py_reader(capacity=32,
+ shapes=[(-1, 1, 28, 28), (-1, 1)],
+ dtypes=['float32', 'int64'],
+ name='test_reader')
+ test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
+ test_loss = network(test_reader)
+
+ fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
+ fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
+
+ train_exe = fluid.ParallelExecutor(use_cuda=True,
+ loss_name=train_loss.name, main_program=train_main_prog)
+ test_exe = fluid.ParallelExecutor(use_cuda=True,
+ loss_name=test_loss.name, main_program=test_main_prog)
+ for epoch_id in range(10):
+ train_reader.start()
+ try:
+ while True:
+ train_exe.run(fetch_list=[train_loss.name])
+ except fluid.core.EOFException:
+ train_reader.reset()
+
+ test_reader.start()
+ try:
+ while True:
+ test_exe.run(fetch_list=[test_loss.name])
+ except fluid.core.EOFException:
+ test_reader.reset()
diff --git a/doc/fluid/api_cn/layers_cn/random_crop_cn.rst b/doc/fluid/api_cn/layers_cn/random_crop_cn.rst
index cb00c7c38ac0da9a1ad1de816e8fbd82ea34116c..e9a229030384d8a9dd28ca0308654510f2326008 100644
--- a/doc/fluid/api_cn/layers_cn/random_crop_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/random_crop_cn.rst
@@ -5,6 +5,12 @@ random_crop
.. py:function:: paddle.fluid.layers.random_crop(x, shape, seed=None)
+:alias_main: paddle.nn.functional.random_crop
+:alias: paddle.nn.functional.random_crop,paddle.nn.functional.extension.random_crop
+:old_api: paddle.fluid.layers.random_crop
+
+
+
该操作对batch中每个实例进行随机裁剪,即每个实例的裁剪位置不同,裁剪位置由均匀分布随机数生成器决定。所有裁剪后的实例都具有相同的维度,由 ``shape`` 参数决定。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/range_cn.rst b/doc/fluid/api_cn/layers_cn/range_cn.rst
index d2a56c31a807fc7732d9903c1e5ab7ad1faaf5e1..519f5e76f72b649cb924adc6c00342b7b5c54929 100644
--- a/doc/fluid/api_cn/layers_cn/range_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/range_cn.rst
@@ -3,30 +3,32 @@
range
-------------------------------
-.. py:function:: paddle.fluid.layers.range(start, end, step, dtype)
+.. py:function:: paddle.fluid.layers.range(start, end, step, dtype, name=None)
-该API根据step均匀分隔给定数值区间[start, end),并返回该分隔结果。
+注意:推荐使用 paddle.arange
-参数:
- - **start** (float32 | float64 | int32 | int64 | Variable) - 区间起点,且区间包括此值, 当类型是Variable时,是shape为 `[1]` 的1-D Tensor。
- - **end** (float32 | float64 | int32 | int64 | Variable) - 区间终点,通常区间不包括此值。但当step不是整数,且浮点数取整会影响输出的长度时例外。
- - **step** (float32 | float64 | int32 | int64 | Variable) - 均匀分割的步长。
- - **dtype** (str | core.VarDesc.VarType) - 输出Tensor的数据类型,可为 `'float32'`, `'float64'`, `'int32'`, `'int64'` 。
+该OP返回以步长 ``step`` 均匀分隔给定数值区间[``start``, ``end``)的1-D Tensor,数据类型为 ``dtype``。
+
+当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值epsilon,使边界可以更加明确。
-返回:均匀分割给定数值区间后得到的1-D Tensor, 数据类型为输入 `dtype` 。
+参数:
+ - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。
+ - **end** (float|int|Tensor) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。
+ - **step** (float|int|Tensor) - 均匀分割的步长。当 ``step`` 类型是Tensor时,是形状为[1]且数据类型为int32、int64、float32、float64的Tensor。
+ - **dtype** (str|np.dtype|core.VarDesc.VarType) - 输出Tensor的数据类型,支持int32、int64、float32、float64。
+ - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。
-返回类型:Variable
+返回:
+ Tensor: 以步长 ``step`` 均匀分割给定数值区间[``start``, ``end``)后得到的1-D Tensor, 数据类型为 ``dtype`` 。
+抛出异常:
+ - ``TypeError`` - 如果 ``dtype`` 不是int32、int64、float32、float64。
-**代码示例**:
+代码示例:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.range(0, 10, 2, 'int32')
-
-
-
-
-
+ # [0, 2, 4, 6, 8]
diff --git a/doc/fluid/api_cn/layers_cn/rank_cn.rst b/doc/fluid/api_cn/layers_cn/rank_cn.rst
index c6cad3995f7149364267800367501e5bc2ecdaa0..0d3aee591f86e84bc4a73fe40e2c31f434b558fa 100644
--- a/doc/fluid/api_cn/layers_cn/rank_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/rank_cn.rst
@@ -5,6 +5,12 @@ rank
.. py:function:: paddle.fluid.layers.rank(input)
+:alias_main: paddle.rank
+:alias: paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank
+:old_api: paddle.fluid.layers.rank
+
+
+
该OP用于计算输入Tensor的维度(秩)。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst b/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst
index ee8531b88befc7ef1d588188d3892fb10d5a54fc..3f6f5b2a83df84fa477006499891176be909b1bf 100644
--- a/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/rank_loss_cn.rst
@@ -5,6 +5,12 @@ rank_loss
.. py:function:: paddle.fluid.layers.rank_loss(label, left, right, name=None)
+:alias_main: paddle.nn.functional.rank_loss
+:alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss
+:old_api: paddle.fluid.layers.rank_loss
+
+
+
该OP实现了RankNet模型中的排序损失层。RankNet是一种文档对(pairwise)排序模型,训练样本由一对文档(假设用A、B来表示)组成。标签(假设用P来表示)表示A的排名是否高于B。更多详情请参考:`RankNet `_
排序损失层有三个输入: :math:`o_i` 、 :math:`o_j` 和 :math:`\tilde{P_{ij}}` ,输入分别表示RankNet模型对文档A、B的输出得分和标签P的值;排序损失层的输入是批输入数据(批大小大于等于1);标签P的取值可以为: {0, 1} 或 {0, 0.5, 1} ,其中,0.5表示输入文档对排序相同。输入数据的排序损失 :math:`C_{i,j}` 计算过程如下:
diff --git a/doc/fluid/api_cn/layers_cn/read_file_cn.rst b/doc/fluid/api_cn/layers_cn/read_file_cn.rst
index 688a52d359c9553ce3d448ea7315980625f4d642..c2ac46e1465712cec6c6c5d7645ce8d4f711cb9f 100644
--- a/doc/fluid/api_cn/layers_cn/read_file_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/read_file_cn.rst
@@ -3,10 +3,13 @@
read_file
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.read_file(reader)
+:api_attr: 声明式编程模式(静态图)
+
+
+
从给定的reader中读取数据
reader是一个Variable,它可以是由函数fluid.layers.py_reader()生成的reader,或者是由函数fluid.layers.double_buffer()生成的装饰Variable。
diff --git a/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst b/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst
index fa65ac200f513bc9fbfe088d21ce8b6574feefba..a76a495a5112dc3404510b76bc310ad0b0f78e37 100644
--- a/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reciprocal_cn.rst
@@ -5,6 +5,12 @@ reciprocal
.. py:function:: paddle.fluid.layers.reciprocal(x, name=None)
+:alias_main: paddle.reciprocal
+:alias: paddle.reciprocal,paddle.tensor.reciprocal,paddle.tensor.math.reciprocal
+:old_api: paddle.fluid.layers.reciprocal
+
+
+
reciprocal 对输入Tensor取倒数
@@ -23,17 +29,14 @@ reciprocal 对输入Tensor取倒数
.. code-block:: python
- import paddle.fluid as fluid
- data = fluid.layers.fill_constant(shape=[2], value=4, dtype='float32') #data=[4.0, 4.0]
- result = fluid.layers.reciprocal(data) # result=[0.25, 0.25]
-
-
-
-
-
-
-
+ import paddle
+ import numpy as np
+ paddle.enable_imperative()
+ x_data = np.array([1, 2, 3, 4]).astype(np.float32)
+ x = paddle.imperative.to_variable(x_data)
+ res = paddle.%s(x)
+ print(res.numpy())
diff --git a/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst
index dbadb7a968de0a32021b998d7c5c2756e65941be..88a1ba9e3f9081ec3772e88481a15864c41ee86e 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_all_cn.rst
@@ -5,6 +5,12 @@ reduce_all
.. py:function:: paddle.fluid.layers.reduce_all(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_all
+:alias: paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all
+:old_api: paddle.fluid.layers.reduce_all
+
+
+
该OP是对指定维度上的Tensor元素进行与逻辑(&)计算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst
index afe04344a036d160161197983e0a7e6683791a65..13a23b8139886d391641a6a9068879d51b2fd1ee 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_any_cn.rst
@@ -5,6 +5,12 @@ reduce_any
.. py:function:: paddle.fluid.layers.reduce_any(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_any
+:alias: paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any
+:old_api: paddle.fluid.layers.reduce_any
+
+
+
该OP是对指定维度上的Tensor元素进行或逻辑(|)计算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst
index 723012415b23d80233267a2badd5af6c3cd343f9..37cc3db57a4d12ac01e43fce0e3b9cea8e609b17 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_max_cn.rst
@@ -5,6 +5,12 @@ reduce_max
.. py:function:: paddle.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_max
+:alias: paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max
+:old_api: paddle.fluid.layers.reduce_max
+
+
+
该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst
index b7b732daded03fdd7d0a334d12d3a1edfdbf3d22..c94fed0a80b6a766a57f10ad6c5357428b0c0bd4 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_mean_cn.rst
@@ -5,6 +5,12 @@ reduce_mean
.. py:function:: paddle.fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_mean
+:alias: paddle.reduce_mean,paddle.tensor.reduce_mean,paddle.tensor.stat.reduce_mean
+:old_api: paddle.fluid.layers.reduce_mean
+
+
+
该OP是对指定维度上的Tensor元素进行平均值算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst
index 509768608c270d5d80d6a1f9084f0c5d66c124b8..2517c935e483cff4a2c7686556e6583126e29bb4 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_min_cn.rst
@@ -5,6 +5,12 @@ reduce_min
.. py:function:: paddle.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_min
+:alias: paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min
+:old_api: paddle.fluid.layers.reduce_min
+
+
+
该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst
index b420857b8131d8e4d4aa3f4b5ff7c64bc92d1b1c..c0a11bc9204a431a4fba0df588189b9d604dcf97 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_prod_cn.rst
@@ -5,11 +5,17 @@ reduce_prod
.. py:function:: paddle.fluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_prod
+:alias: paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod
+:old_api: paddle.fluid.layers.reduce_prod
+
+
+
该OP是对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。
参数:
- **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。
- - **dim** (list | int ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。
+ - **dim** (int|list|tuple ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。
- **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。
- **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
diff --git a/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst b/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst
index ddabbeab762ebaa5d5835c3c0f18adb4333b86d6..da06baf68be2370b27a1162e24882a615b878589 100644
--- a/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reduce_sum_cn.rst
@@ -5,6 +5,12 @@ reduce_sum
.. py:function:: paddle.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None)
+:alias_main: paddle.reduce_sum
+:alias: paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum
+:old_api: paddle.fluid.layers.reduce_sum
+
+
+
该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/relu6_cn.rst b/doc/fluid/api_cn/layers_cn/relu6_cn.rst
index bd1ea8709ae0cd1bbc8070a58521851177641ea0..05c83c7e5e48c6fda17a075dd8bbd13f00c62c02 100644
--- a/doc/fluid/api_cn/layers_cn/relu6_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/relu6_cn.rst
@@ -5,6 +5,12 @@ relu6
.. py:function:: paddle.fluid.layers.relu6(x, threshold=6.0, name=None)
+:alias_main: paddle.nn.functional.relu6
+:alias: paddle.nn.functional.relu6,paddle.nn.functional.activation.relu6
+:old_api: paddle.fluid.layers.relu6
+
+
+
relu6激活函数
.. math:: out=min(max(0, x), threshold)
diff --git a/doc/fluid/api_cn/layers_cn/relu_cn.rst b/doc/fluid/api_cn/layers_cn/relu_cn.rst
index f7cb337b34b1ddff8ac6541d6d75a7bef3eaaf2b..6df2e05bea36cf96f6990269d6bb77952f15549e 100644
--- a/doc/fluid/api_cn/layers_cn/relu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/relu_cn.rst
@@ -5,6 +5,9 @@ relu
.. py:function:: paddle.fluid.layers.relu(x, name=None)
+
+
+
ReLU(Rectified Linear Unit)激活函数
.. math:: Out=max(0,x)
diff --git a/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst b/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst
index 677b110ce2bebf8cc6c6462de28b72e7c0448eeb..7e67ee31003a74f34f64e9403336732388ca0ed2 100644
--- a/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reorder_lod_tensor_by_rank_cn.rst
@@ -6,6 +6,9 @@ reorder_lod_tensor_by_rank
.. py:function:: paddle.fluid.layers.reorder_lod_tensor_by_rank(x, rank_table)
+
+
+
该OP根据 ``rank_table`` 中提供的 ``LoDRankTable`` 类型的顺序信息来实现对 ``X`` 的重新排列。
接口参数 ``X`` 是由多个序列(Sequence)组成的的一个批序列(Batch of Sequences), ``rank_table`` 存储着对batch中序列重新排列的 ``LoDRankTable`` 类型的顺序信息。
@@ -20,13 +23,8 @@ reorder_lod_tensor_by_rank
注意:该OP对 ``X`` 进行的排序所依据的 ``LoDRankTable`` 不一定是在 ``X`` 的基础上得出来的。它可以由其他不同的序列得出,并由该OP依据这个 ``LoDRankTable`` 来对 ``X`` 排序。
参数:
- - **x** (Variable) - 待根据提供的 ``rank_table`` 进行排序的LoDTensor
- - **rank_table** (Variable) - 提供对 ``x`` 重新排列的 ``LoDRankTable`` 类型的顺序信息,构造方法举例如下:
-
-.. code-block:: python
-
- rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
- rank_table = fluid.layers.control_flow.lod_rank_table(rank_data)
+ - **x** (Variable) - 待根据提供的 ``rank_table`` 进行排序的LoDTensor.
+ - **rank_table** (Variable) - 提供对 ``x`` 重新排列的 ``LoDRankTable`` 类型的顺序信息.
返回: 重新排列后的LoDTensor
@@ -37,15 +35,33 @@ reorder_lod_tensor_by_rank
.. code-block:: python
+
+ import numpy as np
import paddle.fluid as fluid
- data_desc = (['input', [9], 0], ['ref', [5], 1])
- data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1])
- rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
- table = fluid.layers.control_flow.lod_rank_table(rank_data)
+
+ rank_data = fluid.layers.data(name='rank_data', shape=[5], dtype='float32', lod_level=2)
+ table = fluid.layers.control_flow.lod_rank_table(rank_data, level=1)
+
+ data = fluid.layers.data(name='data', shape=[9], lod_level=2)
new_data = fluid.layers.reorder_lod_tensor_by_rank(
x=data, rank_table=table)
+ place=fluid.CPUPlace()
+ exe = fluid.Executor(place)
+ exe.run(fluid.default_startup_program())
+
+ rank_tensor = fluid.create_lod_tensor(np.random.random([14,5]).astype("float32"), [[4,1], [3, 2, 2, 3, 4]], place)
+
+ data_ndarray = np.random.random([27, 9]).astype("float32")
+ data_lod = [[1, 2, 2, 4, 4], [2, 2, 4, 2, 2, 2, 1, 1, 2, 2, 4, 2, 1]]
+ data_tensor = fluid.create_lod_tensor(data_ndarray, data_lod, place)
+
+ out = exe.run(fluid.default_main_program(),feed={'data':data_tensor, 'rank_data':rank_tensor}, fetch_list=[new_data], return_numpy=False)
+ print(out[0])
+ # lod: {{0, 4, 5, 9, 11, 13}{0, 2, 6, 8, 9, 11, 13, 14, 15, 17, 19, 23, 25, 27}}
+ #shape: [27, 9]
+
diff --git a/doc/fluid/api_cn/layers_cn/reshape_cn.rst b/doc/fluid/api_cn/layers_cn/reshape_cn.rst
index c0c8f256e2c76a2a4ea550dcb884bbc1e3832a1e..e2a892314e8361cc76f568014bc32cb0fbb8124c 100644
--- a/doc/fluid/api_cn/layers_cn/reshape_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reshape_cn.rst
@@ -5,6 +5,7 @@ reshape
.. py:function:: paddle.fluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
+
该OP在保持输入 ``x`` 数据不变的情况下,改变 ``x`` 的形状。
目标形状可由 ``shape`` 或 ``actual_shape`` 给出。当两个属性同时被指定时,``actual_shape`` 的优先级高于 ``shape`` ,但此时 ``shape`` 只能是整数列表或元组,且在编译时仍然应该正确地设置 ``shape`` 以保证形状推断。
@@ -25,26 +26,21 @@ reshape
2. 给定一个形状为[2,4,6]的三维张量x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且x的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从x的元素总数和剩余维度推断出来的。
3. 给定一个形状为[2,4,6]的三维张量x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且x的数据保持不变。在这种情况下,0对应位置的维度值将从x的对应维数中复制,-1对应位置的维度值由x的元素总数和剩余维度推断出来。
-**注意:参数** ``actual_shape`` **之后将被舍弃,只用参数** ``shape`` **来表示目标形状。**
+.. warning::
+参数 ``actual_shape`` 之后将被舍弃,只用参数 ``shape`` 来表示目标形状。
参数:
- - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。
- - **shape** (list|tuple|Variable)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``shape`` 的类型是 ``Variable``,则是1-D的 ``Tensor`` 或 ``LoDTensor``。
- - **actual_shape** (Variable,可选)- 1-D ``Tensor`` 或 ``LoDTensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。
+ - **x** (Tensor)- N-D ``Tensor``,数据类型为 ``float32``,``float64``,``int32``,或 ``int64``。
+ - **shape** (list|tuple|Tensor)- 数据类型是 ``int32`` 。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple, 它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor``。
+ - **actual_shape** (Tensor,可选)- 1-D ``Tensor``,默认值:`None`。如果 ``actual_shape`` 被提供,``actual_shape`` 具有比 ``shape`` 更高的优先级,此时 ``shape`` 只能是整数列表或元组。更新提示:``actual_shape`` 在未来的版本中将被舍弃,并用 ``shape`` 代替。
- **act** (str,可选)- 对形状改变后的输入变量做非线性激活操作,激活函数类型可以参考 :ref:`api_guide_activations` 。默认值: ``None``。
- **inplace** (bool,可选)- 如果 ``inplace`` 为 ``True``,则 ``layers.reshape`` 的输入和输出是同一个变量,否则 ``layers.reshape`` 的输入和输出是不同的变量。默认值:``False``。请注意,如果 ``x`` 是多个OP的输入,则 ``inplace`` 必须为False。
- **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。
-返回:多维 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``input`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。
-
-返回类型:Variable。
+返回:
+:::::::::
+``Tensor``,改变形状后的 ``Tensor``,数据类型与 ``x`` 相同。如果 ``inplace`` 为 ``False``,则返回一个新的变量,否则将改变输入变量 ``x`` 自身。如果 ``act`` 为 ``None``,则直接返回形状改变后的变量,否则返回经过激活函数后的变量。
-抛出异常:
- - :code:`TypeError`:``actual_shape`` 的类型应该是 Variable 或 None。
- - :code:`TypeError`:``starts`` 的类型应该是list、tuple 或 Variable。
- - :code:`ValueError`:``shape`` 中至多有一个元素可以是-1。
- - :code:`ValueError`:``shape`` 中的元素为0时,对应的维度应该小于等于``x``的维度。
- - :code:`ValueError`:``shape`` 中的元素除了-1之外,都应该是非负值。
**代码示例**
@@ -53,15 +49,15 @@ reshape
import paddle.fluid as fluid
# example 1:
- # attr shape is a list which doesn't contain tensor Variable.
- data_1 = fluid.layers.data(
- name='data_1', shape=[2, 4, 6], dtype='float32')
+ # attr shape is a list which doesn't contain Tensors.
+ data_1 = fluid.data(
+ name='data_1', shape=[2, 4, 6], dtype='float32')
reshaped_1 = fluid.layers.reshape(
- x=data_1, shape=[-1, 0, 3, 2], inplace=True)
+ x=data_1, shape=[-1, 0, 3, 2], inplace=True)
# the shape of reshaped_1 is [2,4,3,2].
# example 2:
- # attr shape is a list which contains tensor Variable.
+ # attr shape is a list which contains Tensors.
data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
dim = fluid.layers.fill_constant([1], "int32", 5)
reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
@@ -69,7 +65,7 @@ reshape
# example 3:
data_3 = fluid.data(
- name="data_3", shape=[2,4,6], dtype='float32')
+ name="data_3", shape=[2,4,6], dtype='float32')
reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
# the shape of reshaped_3 is [6,8].
diff --git a/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst b/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst
index 233364fa74c22e1cc60e374178ae6f8637ef4420..d0fd0a3f2b04cbf90fe6d4944929b9c93cbaec5f 100644
--- a/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/resize_bilinear_cn.rst
@@ -5,6 +5,12 @@ resize_bilinear
.. py:function:: paddle.fluid.layers.resize_bilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW')
+:alias_main: paddle.nn.functional.resize_bilinear
+:alias: paddle.nn.functional.resize_bilinear,paddle.nn.functional.vision.resize_bilinear
+:old_api: paddle.fluid.layers.resize_bilinear
+
+
+
**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。
该OP应用双向性插值法调整输入图片的大小,输出形状按优先级由actual_shape、out_shape和scale指定。
diff --git a/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst b/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst
index fc5b282119f1ab85e413493c9fa201623f46dffd..2d4ff2abf242b745703200a0fb7966486f8faefc 100644
--- a/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/resize_nearest_cn.rst
@@ -5,6 +5,12 @@ resize_nearest
.. py:function:: paddle.fluid.layers.resize_nearest(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, data_format='NCHW')
+:alias_main: paddle.nn.functional.resize_nearest
+:alias: paddle.nn.functional.resize_nearest,paddle.nn.functional.vision.resize_nearest
+:old_api: paddle.fluid.layers.resize_nearest
+
+
+
该OP对输入图片进行大小调整,在高度方向宽度方向进行最邻近插值(nearest neighbor interpolation)操作。
输出形状按优先级顺序依据 ``actual_shape`` , ``out_shape`` 和 ``scale`` 而定。
diff --git a/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst b/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst
index a1425cbddf9e9989963c4e77c15ae40fae3474d4..58c62c6ca849b47d12ff1188b88e3cc3797030d2 100644
--- a/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/resize_trilinear_cn.rst
@@ -5,6 +5,12 @@ resize_trilinear
.. py:function:: paddle.fluid.layers.resize_trilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCDHW')
+:alias_main: paddle.nn.functional.resize_trilinear
+:alias: paddle.nn.functional.resize_trilinear,paddle.nn.functional.vision.resize_trilinear
+:old_api: paddle.fluid.layers.resize_trilinear
+
+
+
**注意:** 参数 ``actual_shape`` 将被弃用,请使用 ``out_shape`` 替代。
该层对输入进行放缩,基于给定的由 ``actual_shape`` , ``out_shape`` , ``scale`` 确定的输出shape,进行三线插值。三线插值是包含三个参数的线性插值方程(D方向,H方向, W方向),在一个3D格子上进行三个方向的线性插值。更多细节,请参考维基百科:https://en.wikipedia.org/wiki/Trilinear_interpolation
diff --git a/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst b/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst
index badd3bc07451b013f7130178ef69b680a5e4763a..426ea26b3a675bf121e2ff8e0c8495e715878f52 100644
--- a/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/retinanet_detection_output_cn.rst
@@ -5,6 +5,12 @@ retinanet_detection_output
.. py:function:: paddle.fluid.layers.retinanet_detection_output(bboxes, scores, anchors, im_info, score_threshold=0.05, nms_top_k=1000, keep_top_k=100, nms_threshold=0.3, nms_eta=1.0)
+:alias_main: paddle.nn.functional.retinanet_detection_output
+:alias: paddle.nn.functional.retinanet_detection_output,paddle.nn.functional.vision.retinanet_detection_output
+:old_api: paddle.fluid.layers.retinanet_detection_output
+
+
+
在 `RetinaNet `_ 中,有多个 `FPN `_ 层会输出用于分类的预测值和位置回归的预测值,该OP通过执行以下步骤将这些预测值转换成最终的检测结果:
1. 在每个FPN层上,先剔除分类预测值小于score_threshold的anchor,然后按分类预测值从大到小排序,选出排名前nms_top_k的anchor,并将这些anchor与其位置回归的预测值做解码操作得到检测框。
@@ -34,27 +40,27 @@ retinanet_detection_output
import paddle.fluid as fluid
- bboxes_low = fluid.data(name='bboxes_low', shape=[1, 44, 4],
- dtype='float32')
- bboxes_high = fluid.data(name='bboxes_high', shape=[1, 11, 4],
- dtype='float32')
- scores_low = fluid.data(name='scores_low', shape=[1, 44, 10],
- dtype='float32')
- scores_high = fluid.data(name='scores_high', shape=[1, 11, 10],
- dtype='float32')
- anchors_low = fluid.data(name='anchors_low', shape=[44, 4],
- dtype='float32')
- anchors_high = fluid.data(name='anchors_high', shape=[11, 4],
- dtype='float32')
- im_info = fluid.data(name="im_info", shape=[1, 3],
- dtype='float32')
+ bboxes_low = fluid.data(
+ name='bboxes_low', shape=[1, 44, 4], dtype='float32')
+ bboxes_high = fluid.data(
+ name='bboxes_high', shape=[1, 11, 4], dtype='float32')
+ scores_low = fluid.data(
+ name='scores_low', shape=[1, 44, 10], dtype='float32')
+ scores_high = fluid.data(
+ name='scores_high', shape=[1, 11, 10], dtype='float32')
+ anchors_low = fluid.data(
+ name='anchors_low', shape=[44, 4], dtype='float32')
+ anchors_high = fluid.data(
+ name='anchors_high', shape=[11, 4], dtype='float32')
+ im_info = fluid.data(
+ name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
- bboxes=[bboxes_low, bboxes_high],
- scores=[scores_low, scores_high],
- anchors=[anchors_low, anchors_high],
- im_info=im_info,
- score_threshold=0.05,
- nms_top_k=1000,
- keep_top_k=100,
- nms_threshold=0.45,
- nms_eta=1.)
+ bboxes=[bboxes_low, bboxes_high],
+ scores=[scores_low, scores_high],
+ anchors=[anchors_low, anchors_high],
+ im_info=im_info,
+ score_threshold=0.05,
+ nms_top_k=1000,
+ keep_top_k=100,
+ nms_threshold=0.45,
+ nms_eta=1.0)
diff --git a/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst b/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst
index caa8340ae354cfc0d054d4899e4cb322884dbb1e..cd37a297b2b303429ee17ad9f2f2881245041ebe 100644
--- a/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/retinanet_target_assign_cn.rst
@@ -5,6 +5,12 @@ retinanet_target_assign
.. py:function:: paddle.fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, gt_labels, is_crowd, im_info, num_classes=1, positive_overlap=0.5, negative_overlap=0.4)
+:alias_main: paddle.nn.functional.retinanet_target_assign
+:alias: paddle.nn.functional.retinanet_target_assign,paddle.nn.functional.vision.retinanet_target_assign
+:old_api: paddle.fluid.layers.retinanet_target_assign
+
+
+
该OP是从输入anchor中找出训练检测模型 `RetinaNet `_ 所需的正负样本,并为每个正负样本分配用于分类的目标值和位置回归的目标值,同时从全部anchor的类别预测值cls_logits、位置预测值bbox_pred中取出属于各正负样本的部分。
正负样本的查找准则如下:
@@ -50,7 +56,6 @@ retinanet_target_assign
.. code-block:: python
import paddle.fluid as fluid
- import numpy as np
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
@@ -63,11 +68,11 @@ retinanet_target_assign
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
- dtype='float32')
+ dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
- dtype='float32')
+ dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
- score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num =
+ score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
diff --git a/doc/fluid/api_cn/layers_cn/reverse_cn.rst b/doc/fluid/api_cn/layers_cn/reverse_cn.rst
index 0ea877075e75037b728d908a64e0a06de1cd1804..a4a552705b1dfc6bb389434a3f0cc771232f91c3 100644
--- a/doc/fluid/api_cn/layers_cn/reverse_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/reverse_cn.rst
@@ -5,14 +5,41 @@ reverse
.. py:function:: paddle.fluid.layers.reverse(x,axis)
+:alias_main: paddle.reverse
+:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
+:old_api: paddle.fluid.layers.reverse
+
+
+
**reverse**
该OP对输入Tensor ``x`` 在指定轴 ``axis`` 上进行数据的逆序操作。
-参数:
- - **x** (Variable) - 多维Tensor,类型必须为int32,int64,float32,float64。
- - **axis** (int|tuple|list) - 指定逆序运算的轴,取值范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。如果 ``axis`` 是一个元组或列表,则在``axis`` 每个元素值所指定的轴上进行逆序运算。
+::
+
+ 示例1:
+ 输入是 LoDTensor 类型:
+ x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ axis = [0, 1]
+
+ 输出:
+ output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
+ 示例2:
+ 输入是 LoDTensorArray 类型:
+ x = {[[0, 1], [2, 3]],
+ [[4, 5, 6]],
+ [[7], [8], [9]]}
+ axis = 0
+
+ 输出:
+ output = {[[7], [8], [9]],
+ [[4, 5, 6]],
+ [[0, 1], [2, 3]]}
+
+参数:
+ - **x** (Variable) - 输入为Tensor或LoDTensorArray,数据类型支持bool,int8,int32,int64,float32和float64。若输入是LoDTensorArray类型,则返回一个逆序的LoDTensorArray,其内部Tensor元素的次序保持不变。
+ - **axis** (int|tuple|list) - 指定逆序运算的轴,取值范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。如果 ``axis`` 是一个元组或列表,则在 ``axis`` 每个元素值所指定的轴上进行逆序运算。如果输入是LoDTensorArray类型,axis须是值为0的int,或shape为[1]的list ``[0]`` 、元组 ``(0,)`` 。
返回:逆序后的Tensor,形状、数据类型和 ``x`` 一致。
返回类型:Variable
@@ -26,3 +53,13 @@ reverse
data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
+
+ # 输入为LoDTensorArray时
+ data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
+ data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
+ tensor_array = fluid.layers.create_array(dtype='float32')
+ i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
+ fluid.layers.array_write(data1, i, tensor_array)
+ fluid.layers.array_write(data2, i+1, tensor_array)
+
+ reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
diff --git a/doc/fluid/api_cn/layers_cn/rnn_cn.rst b/doc/fluid/api_cn/layers_cn/rnn_cn.rst
index bd948e12e34a336ebbf4fc4c56ee0a6e4eb04e95..75faee0a3e3975cc1403cbdec72e187fed7e36ae 100644
--- a/doc/fluid/api_cn/layers_cn/rnn_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/rnn_cn.rst
@@ -4,9 +4,12 @@ rnn
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:method:: paddle.fluid.layers.rnn(cell, inputs, initial_states=None, sequence_length=None, time_major=False, is_reverse=False, **kwargs)
+
+:api_attr: 声明式编程模式(静态图)
+
+
rnn创建一个由RNNCell :code:`cell` 指定的递归神经网络,该神经网络重复执行 :code:`cell.call()` 直至达到 :code:`inputs` 的最大长度。
diff --git a/doc/fluid/api_cn/layers_cn/roi_align_cn.rst b/doc/fluid/api_cn/layers_cn/roi_align_cn.rst
index bc9d09f185d14af07674c17de58303b486b0745a..c5c72638bb5979df1767cf4f7eb765060e245bea 100644
--- a/doc/fluid/api_cn/layers_cn/roi_align_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/roi_align_cn.rst
@@ -5,6 +5,12 @@ roi_align
.. py:function:: paddle.fluid.layers.roi_align(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, sampling_ratio=-1, name=None)
+:alias_main: paddle.nn.functional.roi_align
+:alias: paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align
+:old_api: paddle.fluid.layers.roi_align
+
+
+
**实现RoIAlign操作。**
Region of Interests align(直译:有意义、有价值选区对齐) 用于实现双线性插值,它可以将不均匀大小的输入
diff --git a/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst b/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst
index 71d75db86e8a2429b7b93ec543c3f6b60d36f0b6..1c97a18e0b7897d2b0299bc33d70f94e508531d3 100644
--- a/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/roi_perspective_transform_cn.rst
@@ -5,6 +5,12 @@ roi_perspective_transform
.. py:function:: paddle.fluid.layers.roi_perspective_transform(input, rois, transformed_height, transformed_width, spatial_scale=1.0)
+:alias_main: paddle.nn.functional.roi_perspective_transform
+:alias: paddle.nn.functional.roi_perspective_transform,paddle.nn.functional.vision.roi_perspective_transform
+:old_api: paddle.fluid.layers.roi_perspective_transform
+
+
+
该OP对RoI区域做透视变换,将不规则的RoI区域变成固定大小的矩形区域,透视变换是线性代数里面的一种基础变换。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst b/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst
index 16ff9764a7be3f9fea1c3f74434a44f44ac5ac87..616c119d22d5d91e30903d82879975b0733ace87 100644
--- a/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/roi_pool_cn.rst
@@ -5,6 +5,12 @@ roi_pool
.. py:function:: paddle.fluid.layers.roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0)
+:alias_main: paddle.nn.functional.roi_pool
+:alias: paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool
+:old_api: paddle.fluid.layers.roi_pool
+
+
+
该OP实现了roi池化操作,对非均匀大小的输入执行最大池化,以获得固定大小的特征映射(例如7*7)。
diff --git a/doc/fluid/api_cn/layers_cn/round_cn.rst b/doc/fluid/api_cn/layers_cn/round_cn.rst
index 51ebb254290985796f7a4478edd32f6944e341ee..7de4214b7e9a993490a07b76b28b62142f1d72d2 100644
--- a/doc/fluid/api_cn/layers_cn/round_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/round_cn.rst
@@ -5,6 +5,12 @@ round
.. py:function:: paddle.fluid.layers.round(x, name=None)
+:alias_main: paddle.round
+:alias: paddle.round,paddle.tensor.round,paddle.tensor.math.round
+:old_api: paddle.fluid.layers.round
+
+
+
该OP将输入中的数值四舍五入到最接近的整数数值。
diff --git a/doc/fluid/api_cn/layers_cn/row_conv_cn.rst b/doc/fluid/api_cn/layers_cn/row_conv_cn.rst
index 1bca0e610285ad94d67285599a6a4d5c58fc4782..ab16bdbfbb86e42131b331a373b8e66e2a1099c1 100644
--- a/doc/fluid/api_cn/layers_cn/row_conv_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/row_conv_cn.rst
@@ -3,10 +3,13 @@
row_conv
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,论文链接:
``_
diff --git a/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst b/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst
index df80d16cf277ca7d72dce855a1c6fc190ed4a450..3edc0ee06ede956fc25791c2c833ea6b473c2c9e 100644
--- a/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/rpn_target_assign_cn.rst
@@ -5,6 +5,12 @@ rpn_target_assign
.. py:function:: paddle.fluid.layers.rpn_target_assign(bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info, rpn_batch_size_per_im=256, rpn_straddle_thresh=0.0, rpn_fg_fraction=0.5, rpn_positive_overlap=0.7, rpn_negative_overlap=0.3, use_random=True)
+:alias_main: paddle.nn.functional.rpn_target_assign
+:alias: paddle.nn.functional.rpn_target_assign,paddle.nn.functional.extension.rpn_target_assign
+:old_api: paddle.fluid.layers.rpn_target_assign
+
+
+
该OP用于为anchors分配分类标签和回归标签,以便用这些标签对RPN进行训练。
该OP将anchors分为两种类别,正和负。根据Faster-RCNN的paper,正类别anchor包括以下两种anchor:
diff --git a/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst b/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst
index 6cd896637ba40b17b89dbd0129a345e78e7a4979..0d4a83041be454cdcd2f4e2e6d850d861d3bbbeb 100644
--- a/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/rsqrt_cn.rst
@@ -5,6 +5,12 @@ rsqrt
.. py:function:: paddle.fluid.layers.rsqrt(x, name=None)
+:alias_main: paddle.rsqrt
+:alias: paddle.rsqrt,paddle.tensor.rsqrt,paddle.tensor.math.rsqrt
+:old_api: paddle.fluid.layers.rsqrt
+
+
+
该OP为rsqrt激活函数。
注:输入x应确保为非 **0** 值,否则程序会抛异常退出。
diff --git a/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst b/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst
index 3097aaaab7eafeaa38ceaa49d718363a7fe50e4a..82ca1725079152ea0b411e64ccde808984d8a5a9 100644
--- a/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sampled_softmax_with_cross_entropy_cn.rst
@@ -5,6 +5,12 @@ sampled_softmax_with_cross_entropy
.. py:function:: paddle.fluid.layers.sampled_softmax_with_cross_entropy(logits, label, num_samples, num_true=1, remove_accidental_hits=True, use_customized_samples=False, customized_samples=None, customized_probabilities=None, seed=0)
+:alias_main: paddle.nn.functional.sampled_softmax_with_cross_entropy
+:alias: paddle.nn.functional.sampled_softmax_with_cross_entropy,paddle.nn.functional.loss.sampled_softmax_with_cross_entropy
+:old_api: paddle.fluid.layers.sampled_softmax_with_cross_entropy
+
+
+
**Sampled Softmax With Cross Entropy Operator**
对于较大的输出类,采样的交叉熵损失Softmax被广泛地用作输出层。该运算符为所有示例采样若干个样本,并计算每行采样张量的SoftMax标准化值,然后计算交叉熵损失。
@@ -37,7 +43,7 @@ sampled_softmax_with_cross_entropy
import paddle.fluid as fluid
input = fluid.layers.data(name='data', shape=[256], dtype='float32')
- label = fluid.layers.data(name='label', shape=[5], dtype='int64')
+ label = fluid.layers.data(name='label', shape=[1], dtype='int64')
fc = fluid.layers.fc(input=input, size=100)
out = fluid.layers.sampled_softmax_with_cross_entropy(
logits=fc, label=label, num_samples=25)
diff --git a/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst b/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst
index 637ba97be936623f912f8e1cb3d994bb7198ea01..098bba3e2e75a62a28ea4bd9c040a1960846f858 100644
--- a/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sampling_id_cn.rst
@@ -5,6 +5,9 @@ sampling_id
.. py:function:: paddle.fluid.layers.sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32')
+
+
+
该OP从输入的多项分布中进行采样。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/scale_cn.rst b/doc/fluid/api_cn/layers_cn/scale_cn.rst
index 26b1a42fb4d6ff8465a46e9c5e50a730539ecb6e..6623f9e451b594e71b28235a54dbe858d98ff9c9 100644
--- a/doc/fluid/api_cn/layers_cn/scale_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/scale_cn.rst
@@ -5,6 +5,12 @@ scale
.. py:function:: paddle.fluid.layers.scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None)
+:alias_main: paddle.scale
+:alias: paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale
+:old_api: paddle.fluid.layers.scale
+
+
+
缩放算子。
对输入Tensor进行缩放和偏置,其公式如下:
@@ -56,7 +62,7 @@ scale
import numpy as np
inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32')
- scale = fluid.layers.data(name="scale", shape=[1], dtype='float32'
+ scale = fluid.layers.data(name="scale", shape=[1], dtype='float32',
append_batch_size=False)
output = fluid.layers.scale(inputs, scale = scale, bias = 1.0)
diff --git a/doc/fluid/api_cn/layers_cn/scatter_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_cn.rst
index 6792679400ea02f7b6845d6e2678be0a2570cbef..38824d7fbdd7d3aa250f24e4b8185b3399f53b5b 100644
--- a/doc/fluid/api_cn/layers_cn/scatter_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/scatter_cn.rst
@@ -5,6 +5,12 @@ scatter
.. py:function:: paddle.fluid.layers.scatter(input, index, updates, name=None, overwrite=True)
+:alias_main: paddle.scatter
+:alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter
+:old_api: paddle.fluid.layers.scatter
+
+
+
该OP根据index中的索引值将updates数据更新到input中。
.. code-block:: python
diff --git a/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst
index 3a703fb7a4c04f6dabed1257fa1207472ee4c5b6..95b26e03a214a8cd04a11ef8b6803c246d9cfcb8 100644
--- a/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/scatter_nd_add_cn.rst
@@ -5,6 +5,12 @@ scatter_nd_add
.. py:function:: paddle.fluid.layers.scatter_nd_add(ref, index, updates, name=None)
+:alias_main: paddle.scatter_nd_add
+:alias: paddle.scatter_nd_add,paddle.tensor.scatter_nd_add,paddle.tensor.manipulation.scatter_nd_add
+:old_api: paddle.fluid.layers.scatter_nd_add
+
+
+
该OP通过对Variable中的单个值或切片应用稀疏加法,从而得到输出的Variable。
:code:`ref` 是维度为 :code:`R` 的张量。 :code:`index` 是维度为 :code:`K` 的张量。因此, :code:`index` 的形状是 :math:`[i_0, i_1, ..., i_{K-2}, Q]` ,其中 :math:`Q \leq R` 。:code:`updates` 是一个维度为 :math:`K - 1 + R - Q` 的张量,它的形状是 :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` 。
diff --git a/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst b/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst
index b0e660b03319f087d7995c9699d117a578c1555e..60a0b84a5d774638cf99589cdc78fb42a0cd7d22 100644
--- a/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/scatter_nd_cn.rst
@@ -5,6 +5,12 @@ scatter_nd
.. py:function:: paddle.fluid.layers.scatter_nd(index, updates, shape, name=None)
+:alias_main: paddle.scatter_nd
+:alias: paddle.scatter_nd,paddle.tensor.scatter_nd,paddle.tensor.manipulation.scatter_nd
+:old_api: paddle.fluid.layers.scatter_nd
+
+
+
该OP根据 :code:`index` ,将 :code:`updates` 添加到一个新的张量中,从而得到输出的Variable。这个操作与 :code:`scatter_nd_add` 类似,除了形状为 :code:`shape` 的张量是通过零初始化的。相应地, :code:`scatter_nd(index, updates, shape)` 等价于 :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)` 。如果 :code:`index` 有重复元素,则将累积相应的更新,因此,由于数值近似问题,索引中重复元素的顺序不同可能会导致不同的输出结果。具体的计算方法可以参见 :code:`scatter_nd_add` 。该OP是 :code:`gather_nd` 的反函数。
参数:
diff --git a/doc/fluid/api_cn/layers_cn/selu_cn.rst b/doc/fluid/api_cn/layers_cn/selu_cn.rst
index e76935f82b4a2f7edf075a913b279fb6da906cc2..4aee591bcc8dd87d0df0e9f0ef8faed224d5c5f1 100644
--- a/doc/fluid/api_cn/layers_cn/selu_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/selu_cn.rst
@@ -5,6 +5,12 @@ selu
.. py:function:: paddle.fluid.layers.selu(x, scale=None, alpha=None, name=None)
+:alias_main: paddle.nn.functional.selu
+:alias: paddle.nn.functional.selu,paddle.nn.functional.activation.selu
+:old_api: paddle.fluid.layers.selu
+
+
+
SeLU激活函数,其公式如下:
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst
index cac6a04da17c4639ceaddc7c7d0bf0086895a6ad..8bb16a4524400184880e1ee3cd36e570d048901d 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_concat_cn.rst
@@ -3,10 +3,13 @@
sequence_concat
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_concat(input, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用concat函数(fluid.layers.** :ref:`cn_api_fluid_layers_concat` **)。**
**该OP仅支持LoDTensor** ,通过LoDTensor的LoD信息将输入的多个LoDTensor进行连接(concat),输出连接后的LoDTensor。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst
index 1730822f148df0226e1c718f89e5be8726e5429c..9bf232beca7000c5a42c552814aa5ab70179a559 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_conv_cn.rst
@@ -3,10 +3,13 @@
sequence_conv
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=True, padding_start=None, bias_attr=None, param_attr=None, act=None, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用conv2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_conv2d` **)。**
该OP在给定的卷积参数下(如卷积核数目、卷积核大小等),对输入的变长序列(sequence)LoDTensor进行卷积操作。默认情况下,该OP会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的序列输出长度和输入长度一致。支持通过配置 ``padding_start`` 参数来指定序列填充的行为。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst
index 716220f75f202b18151ce1b8916486c0d1ad4b74..5ac0470fa3a199296d246361c7e4c5528a7e37bc 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_enumerate_cn.rst
@@ -3,10 +3,13 @@
sequence_enumerate
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_enumerate(input, win_size, pad_value=0, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
枚举形状为 ``[d_1, 1]`` 的输入序列所有长度为 ``win_size`` 的子序列,生成一个形状为 ``[d_1, win_size]`` 的新序列,需要时以 ``pad_value`` 填充。
注意,该OP的输入 ``input`` 只能是LodTensor。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst
index a68eee9f3435179983775431c759d0038bd75bac..c76d64fe47934ec7a52a125b07a14ce2e1fd1442 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_expand_as_cn.rst
@@ -3,10 +3,13 @@
sequence_expand_as
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_expand_as(x, y, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` 进行扩展。当前实现要求 ``y`` 的lod层数(level)必须为1,且 ``x`` 的第一维必须和 ``y`` 的第0层lod大小相同,所以扩展后的LodTensor具有和 ``y`` 相同的lod。扩展结果与输入 ``x`` 的lod无关,所以无需考虑 ``x`` 的lod。
注意,该OP的输入 ``x`` 可以是Tensor或LoDTensor, ``y`` 只能是LodTensor。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst
index 414719d8d2de85a688326dee7a90ac573fe673c6..81bbbe60230a1c14f09e8c066ba957bac8c054b9 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_expand_cn.rst
@@ -3,10 +3,13 @@
sequence_expand
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_expand(x, y, ref_level=-1, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
序列扩张层(Sequence Expand Layer),根据输入 ``y`` 的第 ``ref_level`` 层lod对输入 ``x`` 进行扩展。 ``x`` 的lod level最多为1,若 ``x`` 的lod level为1,则 ``x`` 的lod大小必须与 ``y`` 的第 ``ref_level`` 层lod大小相等;若 ``x`` 的lod level为0,则 ``x`` 的第一维大小必须与 ``y`` 第 ``ref_level`` 层大小相等。 ``x`` 的秩最少为2,当 ``x`` 的秩大于2时,将被当作是一个二维张量处理。
注意,该OP的输入 ``x`` 可以是Tensor或LodTensor, ``y`` 只能是LodTensor。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst
index 5eabcb568b23c2195f7e9113ed0eb6b133febfb9..bdc2afbba3f1ffe770b9dfc5364b248b6d44d6da 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_first_step_cn.rst
@@ -3,10 +3,13 @@
sequence_first_step
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_first_step(input)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的第一个时间步(time_step)的特征向量作为池化后的输出向量。
::
diff --git a/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst
index 2e0fcffa6435122250fc226caf9ea26699d4ad36..c2f4604c51ef494f4e374ba0efb88d73a1e1b778 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_last_step_cn.rst
@@ -3,10 +3,13 @@
sequence_last_step
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_last_step(input)
+:api_attr: 声明式编程模式(静态图)
+
+
+
该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的最后一个时间步(time-step)的特征向量作为池化后的输出向量。
::
diff --git a/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst
index e04c21593d0f129ae21908ee5f848e7949bf909b..ac7fafb2faa1728201dc7452da644c8449a6f728 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_mask_cn.rst
@@ -5,6 +5,9 @@ sequence_mask
.. py:function:: paddle.fluid.layers.sequence_mask(x, maxlen=None, dtype='int64', name=None)
+
+
+
该层根据输入 ``x`` 和 ``maxlen`` 输出一个掩码,数据类型为 ``dtype`` 。
假设 x 是一个形状为 ``[d_1, d_2,…, d_n]`` 的张量, 则输出 y 是一个形状为 ``[d_1, d_2,… ,d_n, maxlen]`` 的掩码,其中:
diff --git a/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst
index 5f509c960ecc5e5930fe77b7f5e5f1cc3be09801..12ddf6eb2ecd59afe5700187479a636d33cd7fe7 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_pad_cn.rst
@@ -3,10 +3,13 @@
sequence_pad
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_pad(x,pad_value,maxlen=None,name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
序列填充操作符(Sequence Pad Operator),该OP将同一batch中的序列填充到一个一致的长度(由 ``maxlen`` 指定)。填充的新元素的值具体由输入 ``pad_value`` 指定,并会添加到每一个序列的末尾,使得他们最终的长度保持一致。最后返回一个Python tuple ``(Out, Length)`` ,其中LodTensor ``Out`` 为填充后的序列,LodTensor ``Length`` 为填充前的原序列长度信息。
注意,该OP的输入 ``x`` 只能是LodTensor。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst
index e5a1237838851a6e3c0688e9e02d934ab00361ca..480bc75c92f9d5412861391bbcb5ce94361a1701 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_pool_cn.rst
@@ -3,10 +3,13 @@
sequence_pool
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_pool(input, pool_type, is_test=False, pad_value=0.0)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用pool2d函数(fluid.layers.** :ref:`cn_api_fluid_layers_pool2d` **)。**
该OP **仅支持LoDTensor类型的输入** ,将对输入的LoDTensor进行指定方式的池化(pooling)操作。通过指定pool_type参数,将输入的每个序列(sequence)在最后一层lod_level上或时间步(time-step)上对特征进行诸如sum、average、sqrt等池化操作。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst
index 34f80bef63c67113261560df9c8f7e780cd684c9..78385efd975cc5c88768fe787c552b782688e6cb 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_reshape_cn.rst
@@ -3,10 +3,13 @@
sequence_reshape
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_reshape(input, new_dim)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reshape函数(fluid.layers.** :ref:`cn_api_fluid_layers_reshape` **)。**
**该OP仅支持LoDTensor** ,在指定 ``new_dim`` 参数下,通过序列原始长度、和原始shape计算出新的shape,以输出包含新维度(new_dim)下的LoDTensor。目前仅支持1-level LoDTensor,请确保(原长度*原维数)可以除以新的维数,且每个序列没有余数。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst
index 675ef36d2ae30e27303b2ee67f34a7685cf85d5f..2bad60302dc081653322dee0bbc23a6c20cc84e9 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_reverse_cn.rst
@@ -5,6 +5,9 @@ sequence_reverse
.. py:function:: paddle.fluid.layers.sequence_reverse(x, name=None)
+
+
+
**注意:该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用reverse函数(fluid.layers.** :ref:`cn_api_fluid_layers_reverse` **)。**
**该OP仅支持LoDTensor** ,对于输入的LoDTensor,在每个序列(sequence)上进行反转。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。该OP在构建反向 :ref:`cn_api_fluid_layers_DynamicRNN` 网络时十分有用。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst
index 086a9a41fa6697bca3eebbe486b3a14a422e9edb..aaae0735e5ac837e744f7c87a3c7447a894e8e4c 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_scatter_cn.rst
@@ -3,10 +3,13 @@
sequence_scatter
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_scatter(input, index, updates, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
.. note::
该OP的输入index,updates必须是LoDTensor。
@@ -45,7 +48,7 @@ output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数
参数:
- **input** (Variable) - 维度为 :math:`[N, k_1 ... k_n]` 的Tensor, 支持的数据类型:float32,float64,int32,int64。
- - **index** (Variable) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int64。
+ - **index** (Variable) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int32,int64。
- **updates** (Variable) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。
- **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst
index 4d67aea9acd1c49738242040d25e3a6b69bb3529..4281741274a644ed90949de219105be69e5729b8 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_slice_cn.rst
@@ -3,10 +3,13 @@
sequence_slice
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_slice(input, offset, length, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
**实现Sequence Slice(序列切片)运算**
**该OP输入只能是LoDTensor, 如果您需要处理的是Tensor类型,请使用 :ref:`cn_api_fluid_layers_slice` 。**
@@ -36,7 +39,7 @@ sequence_slice
``offset`` 从0开始。
参数:
- - **input** (Variable) – 输入变量,类型为LoDTensor,承载着完整的序列
+ - **input** (Variable) – 输入变量,类型为LoDTensor,承载着完整的序列。数据类型为float32,float64,int32或int64。
- **offset** (Variable) – 指定每个序列切片的起始索引,数据类型为int32或int64。
- **length** (Variable) – 指定每个子序列的长度,数据类型为int32或int64。
- **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。
diff --git a/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst
index 0638ffb8a2512ce594d23c3805f81c0c9930bdb5..e59c1fbc8d1925860cbe2f38eba343244f20ce68 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_softmax_cn.rst
@@ -3,10 +3,13 @@
sequence_softmax
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_softmax(input, use_cudnn=False, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
.. note::
该OP的输入只能是LoDTensor,如果要处理的输入是Tensor类型,请使用 :ref:`cn_api_fluid_layers_softmax`
diff --git a/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst b/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst
index 0b00795fa1a81e3f47f6cc78ca8a8d7041a856c2..8a1587c733d3341dcc21009a72c700bfc9fe2fe3 100644
--- a/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sequence_unpad_cn.rst
@@ -3,10 +3,13 @@
sequence_unpad
-------------------------------
-**注意:该API仅支持【静态图】模式**
.. py:function:: paddle.fluid.layers.sequence_unpad(x, length, name=None)
+:api_attr: 声明式编程模式(静态图)
+
+
+
.. note::
该OP的输入为Tensor,输出为LoDTensor。该OP用于移除填充元素,与之对应,还存在进行数据填充的OP sequence_pad,详情见: :ref:`cn_api_fluid_layers_sequence_pad`
diff --git a/doc/fluid/api_cn/layers_cn/shape_cn.rst b/doc/fluid/api_cn/layers_cn/shape_cn.rst
index 44df86e1d2f7db338f038ce25deb37f8856b7b94..0672af0a3a203fd1d2e866705c2b52ee21421fe9 100644
--- a/doc/fluid/api_cn/layers_cn/shape_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/shape_cn.rst
@@ -5,14 +5,38 @@ shape
.. py:function:: paddle.fluid.layers.shape(input)
+:alias_main: paddle.shape
+:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
+:old_api: paddle.fluid.layers.shape
+
+
+
shape层。
-获得输入Tensor的shape。
+获得输入Tensor或SelectedRows的shape。
+
+::
+
+ 示例1:
+ 输入是 N-D Tensor类型:
+ input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
+
+ 输出shape:
+ input.shape = [2, 4]
+
+ 示例2:
+ 输入是 SelectedRows类型:
+ input.rows = [0, 4, 19]
+ input.height = 20
+ input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
+ 输出shape:
+ input.shape = [3, 2]
参数:
- - **input** (Variable)- 输入的多维Tensor,数据类型为float32,float64,int32,int64。
+ - **input** (Variable)- 输入的多维Tensor或SelectedRows,数据类型为float16,float32,float64,int32,int64。如果输入是SelectedRows类型,则返回其内部持有Tensor的shape。
+
-返回: 一个Tensor,表示输入Tensor的shape。
+返回: 一个Tensor,表示输入Tensor或SelectedRows的shape。
返回类型: Variable(Tensor)。
@@ -23,7 +47,7 @@ shape层。
import paddle.fluid as fluid
import numpy as np
- inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32")
+ inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
diff --git a/doc/fluid/api_cn/layers_cn/shard_index_cn.rst b/doc/fluid/api_cn/layers_cn/shard_index_cn.rst
index 4f454e662fe74e6ae41a1fc7ae068183097df6dc..2bf72254ca86f7559f30f3dcec891248a0a64e6b 100644
--- a/doc/fluid/api_cn/layers_cn/shard_index_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/shard_index_cn.rst
@@ -5,6 +5,12 @@ shard_index
.. py:function:: paddle.fluid.layers.shard_index(input, index_num, nshards, shard_id, ignore_value=-1)
+:alias_main: paddle.shard_index
+:alias: paddle.shard_index,paddle.tensor.shard_index,paddle.tensor.manipulation.shard_index
+:old_api: paddle.fluid.layers.shard_index
+
+
+
该函数对输入的索引根据分片(shard)的偏移量重新计算。
索引长度被均分为N个分片,如果输入索引所在的分片跟分片ID对应,则该索引以分片的偏移量为界重新计算,否则更新为默认值(ignore_value)。具体计算为:
::
diff --git a/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst b/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst
index a91ce5ea552a528deca87dcd4b6f8755c32712c3..fffbae0a48f36c1cfa80a37ea91fdddf139d75b9 100644
--- a/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/shuffle_channel_cn.rst
@@ -5,6 +5,12 @@ shuffle_channel
.. py:function:: paddle.fluid.layers.shuffle_channel(x, group, name=None)
+:alias_main: paddle.nn.functional.shuffle_channel
+:alias: paddle.nn.functional.shuffle_channel,paddle.nn.functional.vision.shuffle_channel
+:old_api: paddle.fluid.layers.shuffle_channel
+
+
+
该OP将输入 ``x`` 的通道混洗重排。 它将每个组中的输入通道分成 ``group`` 个子组,并通过逐一从每个子组中选择元素来获得新的顺序。
请参阅 https://arxiv.org/pdf/1707.01083.pdf
diff --git a/doc/fluid/api_cn/layers_cn/shuffle_cn.rst b/doc/fluid/api_cn/layers_cn/shuffle_cn.rst
deleted file mode 100644
index 3be4313d48586820adba07b298dabdac23fc86be..0000000000000000000000000000000000000000
--- a/doc/fluid/api_cn/layers_cn/shuffle_cn.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-.. _cn_api_fluid_layers_shuffle:
-
-shuffle
--------------------------------
-
-.. py:function:: paddle.fluid.layers.shuffle(reader, buffer_size)
-
-创建一个特殊的数据读取器,它的输出数据会被重洗(shuffle)。由原始读取器创建的迭代器得到的输出将会被暂存到shuffle缓存区,其后
-会对其进行重洗运算。shuffle缓存区的大小由参数 ``buffer_size`` 决定。
-
-参数:
- - **reader** (callable) – 输出会被shuffle的原始reader
- - **buffer_size** (int) – 进行shuffle的buffer的大小
-
-返回:其输出会被shuffle的一个reader(读取器)
-
-返回类型:callable
-
-**代码示例**:
-
-.. code-block:: python
-
- import paddle.fluid as fluid
- raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
- './data2.recordio'],
- shapes=[(3,224,224), (1,)],
- lod_levels=[0, 0],
- dtypes=['float32', 'int64'],
- thread_num=2,
- buffer_size=2)
- batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
- shuffle_reader = fluid.layers.shuffle(reader=batch_reader, buffer_size=5000)
-
-
-
-
-
-
-
-
diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst
index f510edc7e1eecf4f4e72803124a38ce2e1b2c0d7..fb5ccd21da695a2f3c840663797b66a5ccaff54c 100755
--- a/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sigmoid_cn.rst
@@ -5,6 +5,9 @@ sigmoid
.. py:function:: paddle.fluid.layers.sigmoid(x, name=None)
+
+
+
sigmoid激活函数
.. math::
diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst
index bbe525f558f899f632359d93af0a82cdd26d2467..18506b0f583475bad23de7f9b3c1e805ca9ada16 100644
--- a/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sigmoid_cross_entropy_with_logits_cn.rst
@@ -5,6 +5,12 @@ sigmoid_cross_entropy_with_logits
.. py:function:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits(x, label, ignore_index=-100, name=None, normalize=False)
+:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits
+:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits
+:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
+
+
+
在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。
logistic loss可通过下式计算:
diff --git a/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst b/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst
index b5e33e3b7385808093a1359c9e36b6ed8453c65e..2f47561a89a4560e0c39553205b4d7fa68c7a841 100644
--- a/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst
+++ b/doc/fluid/api_cn/layers_cn/sigmoid_focal_loss_cn.rst
@@ -3,7 +3,13 @@
sigmoid_focal_loss
-------------------------------
-.. py:function:: paddle.fluid.layers.sigmoid_focal_loss(x, label, fg_num, gamma=2, alpha=0.25)
+.. py:function:: paddle.fluid.layers.sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25)
+
+:alias_main: paddle.nn.functional.sigmoid_focal_loss
+:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
+:old_api: paddle.fluid.layers.sigmoid_focal_loss
+
+
`Focal Loss