diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index b520c03a836a9e3f263ba050f151877ffe0d071d..03c73786a6c31868b1893bfcb319e43e37db1a3d 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -169,14 +169,19 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # So, don't set these flags here. +if (NOT WIN32) # windows msvc2015 support c++11 natively. +# -std=c++11 -fPIC not recoginize by msvc, -Xcompiler will be added by cmake. list(APPEND CUDA_NVCC_FLAGS "-std=c++11") -list(APPEND CUDA_NVCC_FLAGS "--use_fast_math") list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC") +endif(NOT WIN32) + +list(APPEND CUDA_NVCC_FLAGS "--use_fast_math") # in cuda9, suppress cuda warning on eigen list(APPEND CUDA_NVCC_FLAGS "-w") # Set :expt-relaxed-constexpr to suppress Eigen warnings list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") +if (NOT WIN32) if(CMAKE_BUILD_TYPE STREQUAL "Debug") list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG}) elseif(CMAKE_BUILD_TYPE STREQUAL "Release") @@ -187,6 +192,13 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel") # nvcc 9 does not support -Os. Use Release flags instead list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE}) endif() +else(NOT WIN32) +if(CMAKE_BUILD_TYPE STREQUAL "Release") + list(APPEND CUDA_NVCC_FLAGS "-O3 -DNDEBUG") +else() + message(FATAL "Windows only support Release build now. Please set visual studio build type to Release, x64 build.") +endif() +endif(NOT WIN32) mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD) mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION) diff --git a/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst b/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst index b782242a6632a5d42a512cf3b830d6e047c064ab..e4682ccb94e6fc60e184632dff9ee16a6bf16ec0 100644 --- a/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst +++ b/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst @@ -1,5 +1,5 @@ -服务器端部署 - Anakin -##################### +Anakin - 服务器端加速引擎 +####################### 使用文档 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst b/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst deleted file mode 100644 index a5209e8560b31e9f0f776fba9a2b8c5bc150165c..0000000000000000000000000000000000000000 --- a/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst +++ /dev/null @@ -1,8 +0,0 @@ -服务器端部署 - 原生引擎 -####################### - -.. toctree:: - :maxdepth: 2 - - build_and_install_lib_cn.rst - native_infer.rst diff --git a/doc/fluid/new_docs/advanced_usage/index.rst b/doc/fluid/new_docs/advanced_usage/index.rst index dea7c236619a0bdbf402f371571d947d1cdbba65..89166573eebca045e948046c69f3b7a3e0031d58 100644 --- a/doc/fluid/new_docs/advanced_usage/index.rst +++ b/doc/fluid/new_docs/advanced_usage/index.rst @@ -10,7 +10,6 @@ .. toctree:: :maxdepth: 2 - deploy/index_native.rst deploy/index_anakin.rst deploy/index_mobile.rst development/contribute_to_paddle.md diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dc7c62b06287ad333dd41082e566b0553d3a5341 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore @@ -0,0 +1,8 @@ +*.pyc +train.log +output +data/cifar-10-batches-py/ +data/cifar-10-python.tar.gz +data/*.txt +data/*.list +data/mean.meta diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/image_classification/README.cn.md index 8d645718e12e4d976a8e71de105e11f495191fbf..4f20843596aa676962a36241f59560ec2a41257b 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/image_classification/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/image_classification/README.cn.md @@ -21,7 +21,7 @@ 图像分类包括通用图像分类、细粒度图像分类等。图1展示了通用图像分类效果,即模型可以正确识别图像上的主要物体。

-
+
图1. 通用图像分类展示

@@ -30,7 +30,7 @@

-
+
图2. 细粒度图像分类展示

@@ -38,7 +38,7 @@ 一个好的模型既要对不同类别识别正确,同时也应该能够对不同视角、光照、背景、变形或部分遮挡的图像正确识别(这里我们统一称作图像扰动)。图3展示了一些图像的扰动,较好的模型会像聪明的人类一样能够正确识别。

-
+
图3. 扰动图片展示[22]

@@ -61,7 +61,7 @@ Alex Krizhevsky在2012年ILSVRC提出的CNN模型 \[[9](#参考文献)\] 取得了历史性的突破,效果大幅度超越传统方法,获得了ILSVRC2012冠军,该模型被称作AlexNet。这也是首次将深度学习用于大规模图像分类中。从AlexNet之后,涌现了一系列CNN模型,不断地在ImageNet上刷新成绩,如图4展示。随着模型变得越来越深以及精妙的结构设计,Top-5的错误率也越来越低,降到了3.5%附近。而在同样的ImageNet数据集上,人眼的辨识错误率大概在5.1%,也就是目前的深度学习模型的识别能力已经超过了人眼。

-
+
图4. ILSVRC图像分类Top-5错误率

@@ -70,7 +70,7 @@ Alex Krizhevsky在2012年ILSVRC提出的CNN模型 \[[9](#参考文献)\] 取得 传统CNN包含卷积层、全连接层等组件,并采用softmax多类别分类器和多类交叉熵损失函数,一个典型的卷积神经网络如图5所示,我们先介绍用来构造CNN的常见组件。

-
+
图5. CNN网络示例[20]

@@ -89,7 +89,7 @@ Alex Krizhevsky在2012年ILSVRC提出的CNN模型 \[[9](#参考文献)\] 取得 牛津大学VGG(Visual Geometry Group)组在2014年ILSVRC提出的模型被称作VGG模型 \[[11](#参考文献)\] 。该模型相比以往模型进一步加宽和加深了网络结构,它的核心是五组卷积操作,每两组之间做Max-Pooling空间降维。同一组内采用多次连续的3X3卷积,卷积核的数目由较浅组的64增多到最深组的512,同一组内的卷积核数目是一样的。卷积之后接两层全连接层,之后是分类层。由于每组内卷积层的不同,有11、13、16、19层这几种模型,下图展示一个16层的网络结构。VGG模型结构相对简洁,提出之后也有很多文章基于此模型进行研究,如在ImageNet上首次公开超过人眼识别的模型\[[19](#参考文献)\]就是借鉴VGG模型的结构。

-
+
图6. 基于ImageNet的VGG16模型

@@ -106,7 +106,7 @@ NIN模型主要有两个特点: Inception模块如下图7所示,图(a)是最简单的设计,输出是3个卷积层和一个池化层的特征拼接。这种设计的缺点是池化层不会改变特征通道数,拼接后会导致特征的通道数较大,经过几层这样的模块堆积后,通道数会越来越大,导致参数和计算量也随之增大。为了改善这个缺点,图(b)引入3个1x1卷积层进行降维,所谓的降维就是减少通道数,同时如NIN模型中提到的1x1卷积也可以修正线性特征。

-
+
图7. Inception模块

@@ -115,7 +115,7 @@ GoogleNet由多组Inception模块堆积而成。另外,在网络最后也没 GoogleNet整体网络结构如图8所示,总共22层网络:开始由3层普通的卷积组成;接下来由三组子网络组成,第一组子网络包含2个Inception模块,第二组包含5个Inception模块,第三组包含2个Inception模块;然后接均值池化层、全连接层。

-
+
图8. GoogleNet[12]

@@ -130,14 +130,14 @@ ResNet(Residual Network) \[[15](#参考文献)\] 是2015年ImageNet图像分类 残差模块如图9所示,左边是基本模块连接方式,由两个输出通道数相同的3x3卷积组成。右边是瓶颈模块(Bottleneck)连接方式,之所以称为瓶颈,是因为上面的1x1卷积用来降维(图示例即256->64),下面的1x1卷积用来升维(图示例即64->256),这样中间3x3卷积的输入和输出通道数都较小(图示例即64->64)。

-
+
图9. 残差模块

图10展示了50、101、152层网络连接示意图,使用的是瓶颈模块。这三个模型的区别在于每组中残差模块的重复次数不同(见图右上角)。ResNet训练收敛较快,成功的训练了上百乃至近千层的卷积神经网络。

-
+
图10. 基于ImageNet的ResNet模型

@@ -149,7 +149,7 @@ ResNet(Residual Network) \[[15](#参考文献)\] 是2015年ImageNet图像分类 由于ImageNet数据集较大,下载和训练较慢,为了方便大家学习,我们使用[CIFAR10]()数据集。CIFAR10数据集包含60,000张32x32的彩色图片,10个类别,每个类包含6,000张。其中50,000张图片作为训练集,10000张作为测试集。图11从每个类别中随机抽取了10张图片,展示了所有的类别。

-
+
图11. CIFAR10数据集[21]

@@ -377,7 +377,7 @@ test_reader = paddle.batch( `event_handler_plot`可以用来利用回调数据来打点画图:

-
+
图12. 训练结果

@@ -469,7 +469,7 @@ Test with Pass 0, Loss 1.1, Acc 0.6 图13是训练的分类错误率曲线图,运行到第200个pass后基本收敛,最终得到测试集上分类错误率为8.54%。

-
+
图13. CIFAR10数据集上VGG模型的分类错误率

diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/cifar.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/cifar.png deleted file mode 100644 index f3c5f2f7b0c84f83382b70124dcd439586ed4eb0..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/cifar.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png deleted file mode 100644 index ca8f858a902ea723d886d2b88c2c0a1005301c50..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png deleted file mode 100644 index 38b21f21604b1bb84fc3f6aa96bd5fce45d15a55..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png deleted file mode 100644 index 647c822e52cd55d50e5f207978f5e6ada86cf34c..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png deleted file mode 100644 index 04245cef60fe7126ae4c92ba8085273965078bee..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg deleted file mode 100644 index 249dbf96df61c3352ea5bd80470f6c4a1e03ff10..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png deleted file mode 100644 index 4660ac122e9d533023a21154d35eee29e3b08d27..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png deleted file mode 100644 index 9591a0c1e8c0165c40ca560be35a7b9a91cd5027..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception_en.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception_en.png deleted file mode 100644 index 39580c20b583f2a15d17fd124a572c84e6e2db1d..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png deleted file mode 100644 index 77f785e03bacd38c4c64a817874a58ff3298d2f3..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet_en.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet_en.png deleted file mode 100644 index 97a1e3eee45c0db95e6a943ca3b8c0cf6c34d4b6..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png deleted file mode 100644 index 57e45cc0c27dd99b9918de2ff1228bc6b65f7424..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot_en.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot_en.png deleted file mode 100644 index 147e575bf49086811c43420d5a9c8f749e2da405..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png deleted file mode 100644 index 0aeb4f254639fdbf18e916dc219ca61602596d85..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg deleted file mode 100644 index c500eb01a90190ff66150871fe83ec275e2de8d7..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png deleted file mode 100644 index c6336a9a69b95dc978719ce68896e3e752e67fed..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations.png deleted file mode 100644 index b4ebbbe6a50f5fd7cd0cccb52cdac5653e34654c..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations_en.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations_en.png deleted file mode 100644 index 88c60fe87f802c5ce560bb15bbdbd229aeafc4e4..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/variations_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png deleted file mode 100644 index 6270eefcfd7071bc1643ee06567e5b81aaf4c177..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/index.rst b/doc/fluid/new_docs/beginners_guide/basics/index.rst index e1fd226116d88fbf137741242b304b367e598ba5..0fcb008e0a7773e81e5124da09fe07366130b924 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/index.rst +++ b/doc/fluid/new_docs/beginners_guide/basics/index.rst @@ -6,7 +6,7 @@ .. todo:: 概述 - + .. toctree:: :maxdepth: 2 diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..29b5622a53a1b0847e9f53febf1cc50dcf4f044a --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore @@ -0,0 +1,12 @@ +data/train.list +data/test.* +data/conll05st-release.tar.gz +data/conll05st-release +data/predicate_dict +data/label_dict +data/word_dict +data/emb +data/feature +output +predict.res +train.log diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/README.cn.md index 47e948bd1ffc0ca692dc9899193e94831ce4234b..0891f5b6b16a1b715b44db6c47ba079adfcad4c5 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/README.cn.md @@ -21,7 +21,7 @@ $$\mbox{[小明]}_{\mbox{Agent}}\mbox{[昨天]}_{\mbox{Time}}\mbox{[晚上]}_\mb 5. 对第4步的结果,通过多分类得到论元的语义角色标签。可以看到,句法分析是基础,并且后续步骤常常会构造的一些人工特征,这些特征往往也来自句法分析。
-
+
图1. 依存句法分析句法树示例
@@ -30,7 +30,7 @@ $$\mbox{[小明]}_{\mbox{Agent}}\mbox{[昨天]}_{\mbox{Time}}\mbox{[晚上]}_\mb 我们继续以上面的这句话为例,图1展示了BIO表示方法。
-
+
图2. BIO标注方法示例
@@ -53,7 +53,7 @@ $$\mbox{[小明]}_{\mbox{Agent}}\mbox{[昨天]}_{\mbox{Time}}\mbox{[晚上]}_\mb 图3是最终得到的栈式循环神经网络结构示意图。

-
+
图3. 基于LSTM的栈式循环神经网络结构示意图

@@ -64,7 +64,7 @@ $$\mbox{[小明]}_{\mbox{Agent}}\mbox{[昨天]}_{\mbox{Time}}\mbox{[晚上]}_\mb 为了克服这一缺陷,我们可以设计一种双向循环网络单元,它的思想简单且直接:对上一节的栈式循环神经网络进行一个小小的修改,堆叠多个LSTM单元,让每一层LSTM单元分别以:正向、反向、正向 …… 的顺序学习上一层的输出序列。于是,从第2层开始,$t$时刻我们的LSTM单元便总是可以看到历史和未来的信息。图4是基于LSTM的双向循环神经网络结构示意图。

-
+
图4. 基于LSTM的双向循环神经网络结构示意图

@@ -79,7 +79,7 @@ CRF是一种概率化结构模型,可以看作是一个概率无向图模型 序列标注任务只需要考虑输入和输出都是一个线性序列,并且由于我们只是将输入序列作为条件,不做任何条件独立假设,因此输入序列的元素之间并不存在图结构。综上,在序列标注任务中使用的是如图5所示的定义在链式图上的CRF,称之为线性链条件随机场(Linear Chain Conditional Random Field)。

-
+
图5. 序列标注任务中使用的线性链条件随机场

@@ -123,7 +123,7 @@ $$\DeclareMathOperator*{\argmax}{arg\,max} L(\lambda, D) = - \text{log}\left(\pr 4. CRF以第3步中LSTM学习到的特征为输入,以标记序列为监督信号,完成序列标注;
-
+
图6. SRL任务上的深层双向LSTM模型
diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png deleted file mode 100644 index e63f5ebd6d00f2e4ecf97b9ab2027e74683013f2..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png deleted file mode 100644 index f0a195c24d9ee493f96bb93c28a99e70566be7a4..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png deleted file mode 100644 index e5f7151c9fcc50a7cf7af485cbbc7e4fccab0c20..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png deleted file mode 100644 index 93b44dd4874402ef29ad7bd7d94147609b92e309..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png deleted file mode 100644 index 592f7ee23bdc88a9a35059612e5ab880bbc9d34b..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png deleted file mode 100644 index c3646312e48db977402fb353dc0c9b4d02269bf4..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png deleted file mode 100644 index 9265b671735940ed6549e2980064d2ce08baae64..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png deleted file mode 100644 index 23f4f45b603e3d60702af2b2464d10fc8deed061..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png deleted file mode 100644 index 0778fda74b2ad22ce4b631791a7b028cdef780a5..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png deleted file mode 100644 index 3d2914c726b5f4c46e66dfa85d4e88649fede6b3..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png deleted file mode 100644 index 0b944ef91e8b5ba4b14d2a35bd8879f261cf8f61..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6129b9e8645010fcb8372d9dc3dbb568dfa80907 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore @@ -0,0 +1,9 @@ +data/wmt14 +data/pre-wmt14 +pretrained/wmt14_model +gen.log +gen_result +train.log +dataprovider_copy_1.py +*.pyc +multi-bleu.perl diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/README.cn.md index f37c559921483a3d7c619ed74903df56b0584bd5..fa2b930be0d26d816566599cece8afbedc1157e0 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/README.cn.md @@ -11,10 +11,10 @@ 为解决以上问题,统计机器翻译(Statistical Machine Translation, SMT)技术应运而生。在统计机器翻译技术中,转化规则是由机器自动从大规模的语料中学习得到的,而非我们人主动提供规则。因此,它克服了基于规则的翻译系统所面临的知识获取瓶颈的问题,但仍然存在许多挑战:1)人为设计许多特征(feature),但永远无法覆盖所有的语言现象;2)难以利用全局的特征;3)依赖于许多预处理环节,如词语对齐、分词或符号化(tokenization)、规则抽取、句法分析等,而每个环节的错误会逐步累积,对翻译的影响也越来越大。 近年来,深度学习技术的发展为解决上述挑战提供了新的思路。将深度学习应用于机器翻译任务的方法大致分为两类:1)仍以统计机器翻译系统为框架,只是利用神经网络来改进其中的关键模块,如语言模型、调序模型等(见图1的左半部分);2)不再以统计机器翻译系统为框架,而是直接用神经网络将源语言映射到目标语言,即端到端的神经网络机器翻译(End-to-End Neural Machine Translation, End-to-End NMT)(见图1的右半部分),简称为NMT模型。 -![nmt](./image/nmt.png) -

+

+
图1. 基于神经网络的机器翻译系统 -

+
本教程主要介绍NMT模型,以及如何用PaddlePaddle来训练一个NMT模型。 @@ -45,18 +45,20 @@ 具体来说,该双向循环神经网络分别在时间维以顺序和逆序——即前向(forward)和后向(backward)——依次处理输入序列,并将每个时间步RNN的输出拼接成为最终的输出层。这样每个时间步的输出节点,都包含了输入序列中当前时刻完整的过去和未来的上下文信息。下图展示的是一个按时间步展开的双向循环神经网络。该网络包含一个前向和一个后向RNN,其中有六个权重矩阵:输入到前向隐层和后向隐层的权重矩阵(`$W_1, W_3$`),隐层到隐层自己的权重矩阵(`$W_2,W_5$`),前向隐层和后向隐层到输出层的权重矩阵(`$W_4, W_6$`)。注意,该网络的前向隐层和后向隐层之间没有连接。 -![bi_rnn](./image/bi_rnn.png) -

-图3. 按时间步展开的双向循环神经网络 -

+ +
+
+图2. 按时间步展开的双向循环神经网络 +
### 编码器-解码器框架 编码器-解码器(Encoder-Decoder)\[[2](#参考文献)\]框架用于解决由一个任意长度的源序列到另一个任意长度的目标序列的变换问题。即编码阶段将整个源序列编码成一个向量,解码阶段通过最大化预测序列概率,从中解码出整个目标序列。编码和解码的过程通常都使用RNN实现。 ![encoder_decoder](./image/encoder_decoder.png) -

-图4. 编码器-解码器框架 -

+
+
+图3. 编码器-解码器框架 +
#### 编码器 @@ -69,16 +71,14 @@ 3. 用RNN编码源语言词序列:这一过程的计算公式为`$h_i=\varnothing _\theta \left ( h_{i-1}, s_i \right )$`,其中`$h_0$`是一个全零的向量,`$\varnothing _\theta$`是一个非线性激活函数,最后得到的`$\mathbf{h}=\left \{ h_1,..., h_T \right \}$`就是RNN依次读入源语言`$T$`个词的状态编码序列。整句话的向量表示可以采用`$\mathbf{h}$`在最后一个时间步`$T$`的状态编码,或使用时间维上的池化(pooling)结果。 第3步也可以使用双向循环神经网络实现更复杂的句编码表示,具体可以用双向GRU实现。前向GRU按照词序列`$(x_1,x_2,...,x_T)$`的顺序依次编码源语言端词,并得到一系列隐层状态`$(\overrightarrow{h_1},\overrightarrow{h_2},...,\overrightarrow{h_T})$`。类似的,后向GRU按照`$(x_T,x_{T-1},...,x_1)$`的顺序依次编码源语言端词,得到`$(\overleftarrow{h_1},\overleftarrow{h_2},...,\overleftarrow{h_T})$`。最后对于词`$x_i$`,通过拼接两个GRU的结果得到它的隐层状态,即`$h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T}$`。 - -![encoder_attention](./image/encoder_attention.png) -

-图5. 使用双向GRU的编码器 -

+
+
+图4. 使用双向GRU的编码器 +
#### 解码器 机器翻译任务的训练过程中,解码阶段的目标是最大化下一个正确的目标语言词的概率。思路是: - 1. 每一个时刻,根据源语言句子的编码信息(又叫上下文向量,context vector)`$c$`、真实目标语言序列的第`$i$`个词`$u_i$`和`$i$`时刻RNN的隐层状态`$z_i$`,计算出下一个隐层状态`$z_{i+1}$`。计算公式如下: $$z_{i+1}=\phi_{\theta '} \left ( c,u_i,z_i \right )$$ 其中`$\phi _{\theta '}$`是一个非线性激活函数;`$c=q\mathbf{h}$`是源语言句子的上下文向量,在不使用[注意力机制](#注意力机制)时,如果[编码器](#编码器)的输出是源语言句子编码后的最后一个元素,则可以定义`$c=h_T$`;`$u_i$`是目标语言序列的第`$i$`个单词,`$u_0$`是目标语言序列的开始标记``,表示解码开始;`$z_i$`是`$i$`时刻解码RNN的隐层状态,`$z_0$`是一个全零的向量。 @@ -100,7 +100,6 @@ $$p\left ( u_{i+1}|u_{<i+1},\mathbf{x} \right )=softmax(W_sz_{i+1}+b_z)$$ 柱搜索算法使用广度优先策略建立搜索树,在树的每一层,按照启发代价(heuristic cost)(本教程中,为生成词的log概率之和)对节点进行排序,然后仅留下预先确定的个数(文献中通常称为beam width、beam size、柱宽度等)的节点。只有这些节点会在下一层继续扩展,其他节点就被剪掉了,也就是说保留了质量较高的节点,剪枝了质量较差的节点。因此,搜索所占用的空间和时间大幅减少,但缺点是无法保证一定获得最优解。 使用柱搜索算法的解码阶段,目标是最大化生成序列的概率。思路是: - 1. 每一个时刻,根据源语言句子的编码信息`$c$`、生成的第`$i$`个目标语言序列单词`$u_i$`和`$i$`时刻RNN的隐层状态`$z_i$`,计算出下一个隐层状态`$z_{i+1}$`。 2. 将`$z_{i+1}$`通过`softmax`归一化,得到目标语言序列的第`$i+1$`个单词的概率分布`$p_{i+1}$`。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png deleted file mode 100644 index 9d8efd50a49d0305586f550344472ab94c93bed3..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png deleted file mode 100644 index 4b35c88fc8ea2c503473c0c15711744e784d6af6..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png deleted file mode 100644 index 1b355e7786d25487a3f564af758c2c52c43b4690..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png deleted file mode 100644 index 3728f782ee09d9308d02b42305027b2735467ead..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png deleted file mode 100644 index 28d7a15a3bd65262bde22a3f41b5aa78b46b368a..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png deleted file mode 100644 index ea8585565da1ecaf241654c278c6f9b15e283286..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png deleted file mode 100644 index 60aee0017de73f462e35708b1055aff8992c03e1..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png deleted file mode 100644 index 6b73798fe632e0873b35c117b86f347c8cf3116a..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png deleted file mode 100644 index 0cde685b84106650a4df18ce335a23e6338d3d11..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png deleted file mode 100644 index a6af429f23f0f7e82650139bbd8dcbef27a34abe..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png deleted file mode 100644 index bf56d73ebf297fadf522389c7b6836dd379aa097..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png deleted file mode 100644 index 557310e044b2b6687e5ea6895417ed946ac7bc11..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f23901aeb3a9e7cd12611fc556742670d04a9bb5 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore @@ -0,0 +1,2 @@ +.idea +.ipynb_checkpoints diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/README.cn.md index 0f7c97021f8ad463fc51ed169604b789ea068c3d..4b79e62f74e587fcd939d9f9e911af80992ea6a3 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/README.cn.md @@ -37,7 +37,7 @@ Prediction Score is 4.25 YouTube是世界上最大的视频上传、分享和发现网站,YouTube推荐系统为超过10亿用户从不断增长的视频库中推荐个性化的内容。整个系统由两个神经网络组成:候选生成网络和排序网络。候选生成网络从百万量级的视频库中生成上百个候选,排序网络对候选进行打分排序,输出排名最高的数十个结果。系统结构如图1所示:

-
+
图1. YouTube 推荐系统结构

@@ -48,7 +48,7 @@ YouTube是世界上最大的视频上传、分享和发现网站,YouTube推荐 首先,将观看历史及搜索词记录这类历史信息,映射为向量后取平均值得到定长表示;同时,输入人口学特征以优化新用户的推荐效果,并将二值特征和连续特征归一化处理到[0, 1]范围。接下来,将所有特征表示拼接为一个向量,并输入给非线形多层感知器(MLP,详见[识别数字](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md)教程)处理。最后,训练时将MLP的输出给softmax做分类,预测时计算用户的综合特征(MLP的输出)与所有视频的相似度,取得分最高的$k$个作为候选生成网络的筛选结果。图2显示了候选生成网络结构。

-
+
图2. 候选生成网络结构

@@ -73,7 +73,7 @@ $$P(\omega=i|u)=\frac{e^{v_{i}u}}{\sum_{j \in V}e^{v_{j}u}}$$ 卷积神经网络主要由卷积(convolution)和池化(pooling)操作构成,其应用及组合方式灵活多变,种类繁多。本小结我们以如图3所示的网络进行讲解:

-
+
图3. 卷积神经网络文本分类模型

@@ -107,7 +107,7 @@ $$\hat c=max(c)$$

-
+
图4. 融合推荐模型

diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png deleted file mode 100644 index c213608e769f69fb2cfe8597f8e696ee53730e3d..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png deleted file mode 100644 index 8aedb2204371e7691140ceffa5992f6080bbf097..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png deleted file mode 100644 index 4298567ac5600173343299999965b20612e7affe..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png deleted file mode 100644 index a98e7cc67606b31e4c945f7eb907563e46dcef56..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png deleted file mode 100644 index 7fd97b9cc3a0b9105b41591af4e8f8e4646bd681..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png deleted file mode 100644 index 90c9b09fb78db98391ee199934f2d16efd6d6652..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png deleted file mode 100644 index 6fc8e11967000ec48c1c0a6fa3c2eaecb80cbb84..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png deleted file mode 100644 index 61e63d9147cbc2901706ef80776d706e5368c3c5..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png deleted file mode 100644 index fbcae2be81141be955076e877b94b0ea5d7e4d4a..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..667762d327cb160376a4119fa9df9db41b6443b2 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore @@ -0,0 +1,10 @@ +data/aclImdb +data/imdb +data/pre-imdb +data/mosesdecoder-master +*.log +model_output +dataprovider_copy_1.py +model.list +*.pyc +.DS_Store diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/README.cn.md index 5844b6fe137c2401a04e47b5b489434ee9b363f1..9900dfb9a67dc6f8940bd7dd3abfa15ac8a3488f 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/README.cn.md @@ -37,7 +37,7 @@ 循环神经网络是一种能对序列数据进行精确建模的有力工具。实际上,循环神经网络的理论计算能力是图灵完备的\[[4](#参考文献)\]。自然语言是一种典型的序列数据(词序列),近年来,循环神经网络及其变体(如long short term memory\[[5](#参考文献)\]等)在自然语言处理的多个领域,如语言模型、句法解析、语义角色标注(或一般的序列标注)、语义表示、图文生成、对话、机器翻译等任务上均表现优异甚至成为目前效果最好的方法。

-
+
图1. 循环神经网络按时间展开的示意图

@@ -66,7 +66,7 @@ $$ h_t = o_t\odot tanh(c_t) $$ 其中,$i_t, f_t, c_t, o_t$分别表示输入门,遗忘门,记忆单元及输出门的向量值,带角标的$W$及$b$为模型参数,$tanh$为双曲正切函数,$\odot$表示逐元素(elementwise)的乘法操作。输入门控制着新输入进入记忆单元$c$的强度,遗忘门控制着记忆单元维持上一时刻值的强度,输出门控制着输出记忆单元的强度。三种门的计算方式类似,但有着完全不同的参数,它们各自以不同的方式控制着记忆单元$c$,如图2所示:

-
+
图2. 时刻$t$的LSTM [7]

@@ -83,7 +83,7 @@ $$ h_t=Recrurent(x_t,h_{t-1})$$ 如图3所示(以三层为例),奇数层LSTM正向,偶数层LSTM反向,高一层的LSTM使用低一层LSTM及之前所有层的信息作为输入,对最高层LSTM序列使用时间维度上的最大池化即可得到文本的定长向量表示(这一表示充分融合了文本的上下文信息,并且对文本进行了深层次抽象),最后我们将文本表示连接至softmax构建分类模型。

-
+
图3. 栈式双向LSTM用于文本分类

diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png deleted file mode 100644 index 98fbea413a98a619004ca669c67f5f867fe974c9..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png deleted file mode 100644 index d73a00bf2c1fca2f9b8c26bccf5ea844fa1db50b..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png deleted file mode 100644 index 26c904102a6e6c4e30f0048b81373ae8c148b355..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg deleted file mode 100644 index 6b2adf70f2b5112a2e82505da5cff9f5fd0c6298..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png deleted file mode 100644 index 8b5dbd726178b5555c513294e7b10a81acc96ff5..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a620e0279c310d213d4e6d8e99e666962c11e352 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore @@ -0,0 +1,3 @@ +data/train.list +data/test.list +data/simple-examples* diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/README.cn.md b/doc/fluid/new_docs/beginners_guide/basics/word2vec/README.cn.md index d21c7ddcc501f863b5ce672123dbbc6c26528f15..2c68cdac4f10319359b74bc92569dfd3f65380b5 100644 --- a/doc/fluid/new_docs/beginners_guide/basics/word2vec/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/basics/word2vec/README.cn.md @@ -34,7 +34,7 @@ $$X = USV^T$$ 本章中,当词向量训练好后,我们可以用数据可视化算法t-SNE\[[4](#参考文献)\]画出词语特征在二维上的投影(如下图所示)。从图中可以看出,语义相关的词语(如a, the, these; big, huge)在投影上距离很近,语意无关的词(如say, business; decision, japan)在投影上的距离很远。

-
+
图1. 词向量的二维投影

@@ -90,7 +90,7 @@ $$\frac{1}{T}\sum_t f(w_t, w_{t-1}, ..., w_{t-n+1};\theta) + R(\theta)$$ 其中$f(w_t, w_{t-1}, ..., w_{t-n+1})$表示根据历史n-1个词得到当前词$w_t$的条件概率,$R(\theta)$表示参数正则项。

-
+
图2. N-gram神经网络模型

@@ -122,7 +122,7 @@ $$\frac{1}{T}\sum_t f(w_t, w_{t-1}, ..., w_{t-n+1};\theta) + R(\theta)$$ CBOW模型通过一个词的上下文(各N个词)预测当前词。当N=2时,模型如下图所示:

-
+
图3. CBOW模型

@@ -137,7 +137,7 @@ $$context = \frac{x_{t-1} + x_{t-2} + x_{t+1} + x_{t+2}}{4}$$ CBOW的好处是对上下文词语的分布在词向量上进行了平滑,去掉了噪声,因此在小数据集上很有效。而Skip-gram的方法中,用一个词预测其上下文,得到了当前词上下文的很多样本,因此可用于更大的数据集。

-
+
图4. Skip-gram模型

@@ -194,7 +194,7 @@ dream that one day 本配置的模型结构如下图所示:

-
+
图5. 模型配置中的N-gram神经网络模型

diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png deleted file mode 100644 index 384f59919a2c8dedb198e97d51434616648932e1..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png deleted file mode 100644 index 76b7d4bc0f99372465bd9aa34721513d39ad0776..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png deleted file mode 100644 index d985c393e618e9b79df05e4ff0ae57ccc93744d0..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png deleted file mode 100644 index 2e16ab2f443732b8ef5404a8e7cd2457bc5eee23..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png deleted file mode 100644 index 2449dce6a86b43b1b997ff418ed0dba56848463f..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png deleted file mode 100644 index 1e0b40a8f7aefdf46d42761305511f281c08e595..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png deleted file mode 100644 index 158bd64b8f8729dea67834a8d591d21bce8b8564..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png deleted file mode 100644 index ce4a8bf4769183cbaff91793753d2350a3ce936c..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png deleted file mode 100644 index a3ab385845d3dc8b5c670bae91225bc8dd47a8bb..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png deleted file mode 100644 index 3c36c6d1f66eb98ea78c0673965d02a4ee3aa288..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md index 27d25b43961ce74d73e391b735369501fb80a231..9574dbea2f9a39bb196b61bb4fd12ba7c378f75a 100644 --- a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md @@ -15,7 +15,7 @@ $$y_i = \omega_1x_{i1} + \omega_2x_{i2} + \ldots + \omega_dx_{id} + b, i=1,\ldo ## 效果展示 我们使用从[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)获得的波士顿房价数据集进行模型的训练和预测。下面的散点图展示了使用模型对部分房屋价格进行的预测。其中,每个点的横坐标表示同一类房屋真实价格的中位数,纵坐标表示线性回归模型根据特征预测的结果,当二者值完全相等的时候就会落在虚线上。所以模型预测得越准确,则点离虚线越近。

-
+
图1. 预测值 V.S. 真实值

@@ -40,13 +40,9 @@ $$MSE=\frac{1}{n}\sum_{i=1}^{n}{(\hat{Y_i}-Y_i)}^2$$ ### 训练过程 定义好模型结构之后,我们要通过以下几个步骤进行模型训练 - 1. 初始化参数,其中包括权重$\omega_i$和偏置$b$,对其进行初始化(如0均值,1方差)。 - 2. 网络正向传播计算网络输出和损失函数。 - 3. 根据损失函数进行反向误差传播 ([backpropagation](https://en.wikipedia.org/wiki/Backpropagation)),将网络误差从输出层依次向前传递, 并更新网络中的参数。 - 4. 重复2~3步骤,直至网络训练误差达到规定的程度或训练轮次达到设定值。 ## 数据集 @@ -84,7 +80,7 @@ $$MSE=\frac{1}{n}\sum_{i=1}^{n}{(\hat{Y_i}-Y_i)}^2$$ - 很多的机器学习技巧/模型(例如L1,L2正则项,向量空间模型-Vector Space Model)都基于这样的假设:所有的属性取值都差不多是以0为均值且取值范围相近的。

-
+
图2. 各维属性的取值范围

@@ -199,10 +195,12 @@ step = 0 def event_handler_plot(event): global step if isinstance(event, fluid.EndStepEvent): - if event.step % 10 == 0: # record the test cost every 10 seconds + if step % 10 == 0: # record a train cost every 10 batches + plot_cost.append(train_title, step, event.metrics[0]) + + if step % 100 == 0: # record a test cost every 100 batches test_metrics = trainer.test( reader=test_reader, feed_order=feed_order) - plot_cost.append(test_title, step, test_metrics[0]) plot_cost.plot() @@ -210,12 +208,13 @@ def event_handler_plot(event): # If the accuracy is good enough, we can stop the training. print('loss is less than 10.0, stop') trainer.stop() - - # We can save the trained parameters for the inferences later - if params_dirname is not None: - trainer.save_params(params_dirname) - step += 1 + + if isinstance(event, fluid.EndEpochEvent): + if event.epoch % 10 == 0: + # We can save the trained parameters for the inferences later + if params_dirname is not None: + trainer.save_params(params_dirname) ``` ### 开始训练 @@ -231,11 +230,10 @@ trainer.train( event_handler=event_handler_plot, feed_order=feed_order) ``` - -

-
- 图3. 训练结果 -

+
+
+图3 训练结果 +
## 预测 @@ -262,18 +260,18 @@ inferencer = fluid.Inferencer( batch_size = 10 test_reader = paddle.batch(paddle.dataset.uci_housing.test(),batch_size=batch_size) test_data = test_reader().next() -test_feat = numpy.array([data[0] for data in test_data]).astype("float32") -test_label = numpy.array([data[1] for data in test_data]).astype("float32") +test_x = numpy.array([data[0] for data in test_data]).astype("float32") +test_y = numpy.array([data[1] for data in test_data]).astype("float32") -results = inferencer.infer({'x': test_feat}) +results = inferencer.infer({'x': test_x}) print("infer results: (House Price)") -for k in range(0, batch_size-1): - print("%d. %f" % (k, results[0][k])) +for idx, val in enumerate(results[0]): + print("%d: %.2f" % (idx, val)) print("\nground truth:") -for k in range(0, batch_size-1): - print("%d. %f" % (k, test_label[k])) +for idx, val in enumerate(test_y): + print("%d: %.2f" % (idx, val)) ``` ## 总结 diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png deleted file mode 100644 index 27e4acb1313794f52ad9ad9e874cdadd197ff41f..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions_en.png deleted file mode 100644 index f111c7cd766b7e9981513cc8c65be87dbbf3a79e..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png deleted file mode 100644 index 5325df4800985983e17476f007658d1cdb170b1c..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges_en.png deleted file mode 100644 index 6d6a079bfdcc33617f6cf36612b271b48be6304f..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test1.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test1.png deleted file mode 100644 index bcd304a6a0baf30ecfbc43e08fc0aca179d05958..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test1.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md index 3289116991cb8ebaa4a6fb78e100ce16f633d69c..e6f89b23a95d1a07565f3e0a285e9c3f921930df 100644 --- a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md +++ b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md @@ -6,8 +6,8 @@ 当我们学习编程的时候,编写的第一个程序一般是实现打印"Hello World"。而机器学习(或深度学习)的入门教程,一般都是 [MNIST](http://yann.lecun.com/exdb/mnist/) 数据库上的手写识别问题。原因是手写识别属于典型的图像分类问题,比较简单,同时MNIST数据集也很完备。MNIST数据集作为一个简单的计算机视觉数据集,包含一系列如图1所示的手写数字图片和对应的标签。图片是28x28的像素矩阵,标签则对应着0~9的10个数字。每张图片都经过了大小归一化和居中处理。

-
- 图1. MNIST图片示例 +
+图1. MNIST图片示例

MNIST数据集是从 [NIST](https://www.nist.gov/srd/nist-special-database-19) 的Special Database 3(SD-3)和Special Database 1(SD-1)构建而来。由于SD-3是由美国人口调查局的员工进行标注,SD-1是由美国高中生进行标注,因此SD-3比SD-1更干净也更容易识别。Yann LeCun等人从SD-1和SD-3中各取一半作为MNIST的训练集(60000条数据)和测试集(10000条数据),其中训练集来自250位不同的标注员,此外还保证了训练集和测试集的标注员是不完全相同的。 @@ -40,12 +40,12 @@ $$ y_i = \text{softmax}(\sum_j W_{i,j}x_j + b_i) $$ 在分类问题中,我们一般采用交叉熵代价损失函数(cross entropy loss),公式如下: -$$ L_{cross-entropy} (label, y) = -\sum_i label_ilog(y_i) $$ +$$ L_{cross-entropy}(label, y) = -\sum_i label_ilog(y_i) $$ 图2为softmax回归的网络图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。

-
+
图2. softmax回归网络结构图

@@ -54,16 +54,14 @@ $$ L_{cross-entropy} (label, y) = -\sum_i label_ilog(y_i) $$ Softmax回归模型采用了最简单的两层神经网络,即只有输入层和输出层,因此其拟合能力有限。为了达到更好的识别效果,我们考虑在输入层和输出层中间加上若干个隐藏层\[[10](#参考文献)\]。 1. 经过第一个隐藏层,可以得到 $ H_1 = \phi(W_1X + b_1) $,其中$\phi$代表激活函数,常见的有sigmoid、tanh或ReLU等函数。 - 2. 经过第二个隐藏层,可以得到 $ H_2 = \phi(W_2H_1 + b_2) $。 - 3. 最后,再经过输出层,得到的$Y=\text{softmax}(W_3H_2 + b_3)$,即为最后的分类结果向量。 图3为多层感知器的网络结构图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。

-
+
图3. 多层感知器网络结构图

@@ -72,7 +70,7 @@ Softmax回归模型采用了最简单的两层神经网络,即只有输入层 在多层感知器模型中,将图像展开成一维向量输入到网络中,忽略了图像的位置和结构信息,而卷积神经网络能够更好的利用图像的结构信息。[LeNet-5](http://yann.lecun.com/exdb/lenet/)是一个较简单的卷积神经网络。图4显示了其结构:输入的二维图像,先经过两次卷积层到池化层,再经过全连接层,最后使用softmax分类作为输出层。下面我们主要介绍卷积层和池化层。

-
+
图4. LeNet-5卷积神经网络结构

@@ -81,7 +79,7 @@ Softmax回归模型采用了最简单的两层神经网络,即只有输入层 卷积层是卷积神经网络的核心基石。在图像识别里我们提到的卷积是二维卷积,即离散二维滤波器(也称作卷积核)与二维图像做卷积操作,简单的讲是二维滤波器滑动到二维图像上所有位置,并在每个位置上与该像素点及其领域像素点做内积。卷积操作被广泛应用与图像处理领域,不同卷积核可以提取不同的特征,例如边沿、线性、角等特征。在深层卷积神经网络中,通过卷积操作可以提取出图像低级到复杂的特征。

-
+
图5. 卷积层图片

@@ -98,7 +96,7 @@ Softmax回归模型采用了最简单的两层神经网络,即只有输入层 #### 池化层

-
+
图6. 池化层图片

@@ -106,8 +104,7 @@ Softmax回归模型采用了最简单的两层神经网络,即只有输入层 更详细的关于卷积神经网络的具体知识可以参考[斯坦福大学公开课]( http://cs231n.github.io/convolutional-networks/ )和[图像分类](https://github.com/PaddlePaddle/book/blob/develop/image_classification/README.md)教程。 -### 常见激活函数介绍 - +### 常见激活函数介绍 - sigmoid激活函数: $ f(x) = sigmoid(x) = \frac{1}{1+e^{-x}} $ - tanh激活函数: $ f(x) = tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}} $ @@ -136,20 +133,18 @@ PaddlePaddle在API中提供了自动加载[MNIST](http://yann.lecun.com/exdb/mni 我们建议使用 Fluid API,因为它更容易学起来。 下面是快速的 Fluid API 概述。 - 1. `inference_program`:指定如何从数据输入中获得预测的函数。 这是指定网络流的地方。 -2. `train_program`:指定如何从 `inference_program` 和`标签值`中获取 `loss` 的函数。 +1. `train_program`:指定如何从 `inference_program` 和`标签值`中获取 `loss` 的函数。 这是指定损失计算的地方。 -3. `optimizer_func`: “指定优化器配置的函数。优化器负责减少损失并驱动培训。Paddle 支持多种不同的优化器。 +1. `optimizer_func`: “指定优化器配置的函数。优化器负责减少损失并驱动培训。Paddle 支持多种不同的优化器。 -4. `Trainer`:PaddlePaddle Trainer 管理由 `train_program` 和 `optimizer` 指定的训练过程。 +1. `Trainer`:PaddlePaddle Trainer 管理由 `train_program` 和 `optimizer` 指定的训练过程。 通过 `event_handler` 回调函数,用户可以监控培训的进展。 -5. `Inferencer`:Fluid inferencer 加载 `inference_program` 和由 Trainer 训练的参数。 - +1. `Inferencer`:Fluid inferencer 加载 `inference_program` 和由 Trainer 训练的参数。 然后,它可以推断数据和返回预测。 在这个演示中,我们将深入了解它们。 @@ -240,6 +235,7 @@ def train_program(): acc = fluid.layers.accuracy(input=predict, label=label) return [avg_cost, acc] + ``` #### Optimizer Function 配置 @@ -255,9 +251,9 @@ def optimizer_program(): 下一步,我们开始训练过程。`paddle.dataset.movielens.train()`和`paddle.dataset.movielens.test()`分别做训练和测试数据集。这两个函数各自返回一个reader——PaddlePaddle中的reader是一个Python函数,每次调用的时候返回一个Python yield generator。 -下面`shuffle`是一个reader decorator,它接受一个reader A,返回另一个reader B 。reader B 每次读入`buffer_size`条训练数据到一个buffer里,然后随机打乱其顺序,并且逐条输出。 +下面`shuffle`是一个reader decorator,它接受一个reader A,返回另一个reader B。reader B 每次读入`buffer_size`条训练数据到一个buffer里,然后随机打乱其顺序,并且逐条输出。 -`batch`是一个特殊的decorator,它的输入是一个reader,输出是一个batched reader 。在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minibatch。 +`batch`是一个特殊的decorator,它的输入是一个reader,输出是一个batched reader。在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minibatch。 ```python train_reader = paddle.batch( @@ -280,7 +276,6 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( train_func=train_program, place=place, optimizer_func=optimizer_program) - ``` #### Event Handler 配置 @@ -315,11 +310,10 @@ def event_handler(event): `event_handler_plot` 可以用来在训练过程中画图如下: - -

-
-图7. 训练结果 -

+
+
+图7 训练结果 +
```python diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png deleted file mode 100644 index 3f5cdaacdc6acce41c5c6c99649be46685cf9903..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_en.png deleted file mode 100644 index bc1a9a4ccf81972dc0d69cf4c808a52218e14d61..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png deleted file mode 100644 index 65bd17eacd41bbdbdb042bd1ba366eb53663b410..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log_en.png deleted file mode 100644 index 77524754df906ab096e120bd657449f4565c3418..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/conv_layer.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/conv_layer.png deleted file mode 100644 index c751892ba0be3ae803b5933c3f33487ecfb6fe7f..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/conv_layer.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png deleted file mode 100644 index 030cd60d3b4af9aecd4941204da4ad15f6e1189f..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png deleted file mode 100644 index 90b02fa2a735cfcc9efb2de90906325dedcb358c..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling_en.png deleted file mode 100644 index c626723512b6ee02abd55e5bab65e7629d130522..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png deleted file mode 100644 index 9f4d26cd8da32201d0a5e9c72d466301dd2b42a1..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_en.png deleted file mode 100644 index 1fedea6a75abbf132cbbcf8ab10ce045997d697a..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png deleted file mode 100644 index f5a478fdc24f29c17555a2f1451f3f5a079faed9..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log_en.png deleted file mode 100644 index 7d5508a1eccfcea1925f438043ee93b57769bebf..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png deleted file mode 100644 index 4edd7cabf8a2282f6392ac1421c7ca4afb288589..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png deleted file mode 100644 index 40b98298288b9c406fce1cbca9c913753020a94d..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression_en.png deleted file mode 100644 index 833d3c663c94dd2d57fd19686949ded37a91f541..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png deleted file mode 100644 index 47204941af7f22e68386a70a06ec4f122b83e262..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log_en.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log_en.png deleted file mode 100644 index 6fa0a951d5262effb707e3e15af8cb900e5560b8..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log_en.png and /dev/null differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test2.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test2.png deleted file mode 100644 index 5cb87b450d0398bcfaec0e647c362052069797e7..0000000000000000000000000000000000000000 Binary files a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test2.png and /dev/null differ diff --git a/doc/fluid/new_docs/advanced_usage/deploy/build_and_install_lib_cn.rst b/doc/fluid/new_docs/user_guides/howto/inference/build_and_install_lib_cn.rst similarity index 100% rename from doc/fluid/new_docs/advanced_usage/deploy/build_and_install_lib_cn.rst rename to doc/fluid/new_docs/user_guides/howto/inference/build_and_install_lib_cn.rst diff --git a/doc/fluid/new_docs/user_guides/howto/inference/index.rst b/doc/fluid/new_docs/user_guides/howto/inference/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..45e1a2883773b92ed47ef8d51417bbdcd060b4ec --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/inference/index.rst @@ -0,0 +1,11 @@ +############ +模型预测部署 +############ + +PaddlePaddle Fluid 提供了 C++ API 来支持模型的部署上线 + +.. toctree:: + :maxdepth: 2 + + build_and_install_lib_cn.rst + native_infer.rst diff --git a/doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst b/doc/fluid/new_docs/user_guides/howto/inference/native_infer.rst similarity index 94% rename from doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst rename to doc/fluid/new_docs/user_guides/howto/inference/native_infer.rst index aa9377c112856693cda72779bd399f2415d716f0..21a6fe5cf54d0c0c760ade4ba602024ffa29675f 100644 --- a/doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst +++ b/doc/fluid/new_docs/user_guides/howto/inference/native_infer.rst @@ -4,11 +4,13 @@ Paddle 预测 API 为了更简单方便的预测部署,Fluid 提供了一套高层 API 用来隐藏底层不同的优化实现。 -`预测库相关代码 `__ +`预测库相关代码 `__ 包括 - 头文件 ``paddle_inference_api.h`` 定义了所有的接口 - 库文件\ ``libpaddle_fluid.so`` 或 ``libpaddle_fluid.a`` +- 库文件 ``libpaddle_inference_api.so`` 或 + ``libpaddle_inference_api.a`` 编译和依赖可以参考 :ref:`install_or_build_cpp_inference_lib` 。 @@ -95,7 +97,8 @@ engine CHECK(predictor->Run(slots, &outputs)); // 获取 outputs ... -编译时,联编 ``libpaddle_fluid.a/.so`` 即可。 +编译时,联编 ``libpaddle_fluid.a/.so`` 和 +``libpaddle_inference_api.a/.so`` 便可。 详细代码参考 ------------ diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst b/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst index 56fa928029903f1e3bd3e8064c146797f01b2b85..cca3684b78518867eae95d82e1347b52427ddc81 100644 --- a/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst +++ b/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst @@ -38,7 +38,6 @@ PaddlePaddle Fluid支持两种传入数据的方式: :maxdepth: 2 feeding_data - use_recordio_reader Python Reader ############# diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst b/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst deleted file mode 100644 index dfda33f1b03516fe2c704f55d095955282b19109..0000000000000000000000000000000000000000 --- a/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. _user_guide_use_recordio_as_train_data: - -############################ -使用RecordIO文件作为训练数据 -############################ - -相比于 :ref:`user_guide_use_numpy_array_as_train_data`, -:ref:`user_guide_use_recordio_as_train_data` 的性能更好; -但是用户需要先将训练数据集转换成RecordIO文件格式,再使用 -:code:`fluid.layers.open_files()` 层在神经网络配置中导入 RecordIO 文件。 -用户还可以使用 :code:`fluid.layers.double_buffer()` 加速数据从内存到显存的拷贝, -使用 :code:`fluid.layers.Preprocessor` 工具进行数据增强。 - -将训练数据转换成RecordIO文件格式 -################################ - -:code:`fluid.recordio_writer` 中,每个记录都是一个 -:code:`vector`, 即一个支持序列信息的Tensor数组。这个数组包括训练所需 -的所有特征。例如对于图像分类来说,这个数组可以包含图片和分类标签。 - -用户可以使用 :code:`fluid.recordio_writer.convert_reader_to_recordio_file()` 可以将 -:ref:`user_guide_reader` 转换成一个RecordIO文件。或者可以使用 -:code:`fluid.recordio_writer.convert_reader_to_recordio_files()` 将一个 -:ref:`user_guide_reader` 转换成多个RecordIO文件。 - -具体使用方法为: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - def reader_creator(): - def __impl__(): - for i in range(1000): - yield [ - numpy.random.random(size=[3,224,224], dtype="float32"), - numpy.random.random(size=[1], dtype="int64") - ] - return __impl__ - - img = fluid.layers.data(name="image", shape=[3, 224, 224]) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace()) - - BATCH_SIZE = 32 - reader = paddle.batch(reader_creator(), batch_size=BATCH_SIZE) - fluid.recordio_writer.convert_reader_to_recordio_file( - "train.recordio", feeder=feeder, reader_creator=reader) - -其中 :code:`reader_creator` 创建了一个 :code:`Reader`。 -:ref:`_api_fluid_data_feeder_DataFeeder` -是将 :code:`Reader` 转换成 :code:`LoDTensor` 的工具。详细请参考 -:ref:`user_guide_reader` 。 - -上述程序将 :code:`reader_creator` 的数据转换成了 :code:`train.recordio` 文件, -其中每一个record 含有 32 条样本。如果batch size会在训练过程中调整, -用户可以将每一个Record的样本数设置成1。并参考 -:ref:`user_guide_use_recordio_as_train_data_use_op_create_batch`。 - - -配置神经网络, 打开RecordIO文件 -############################## - -RecordIO文件转换好之后,用户可以使用 :code:`fluid.layers.open_files()` -打开文件,并使用 :code:`fluid.layers.read_file` 读取文件内容。 -简单使用方法如下: - -.. code-block:: python - - import paddle.fluid as fluid - - file_obj = fluid.layers.open_files( - filenames=["train.recordio"], - shape=[[3, 224, 224], [1]], - lod_levels=[0, 0], - dtypes=["float32", "int64"], - pass_num=100 - ) - - image, label = fluid.layers.read_file(file_obj) - -其中如果设置了 :code:`pass_num` ,那么当所有数据读完后,会重新读取数据, -直到读取了 :code:`pass_num` 遍。 - - - -进阶使用 -######## - - -使用 :code:`fluid.layers.double_buffer()` ------------------------------------------- - -:code:`Double buffer` 使用双缓冲技术,将训练数据从内存中复制到显存中。配置双缓冲 -需要使用 :code:`fluid.layers.double_buffer()` 修饰文件对象。 例如: - -.. code-block:: python - - import paddle.fliud as fluid - file_obj = fluid.layers.open_files(...) - file_obj = fluid.layers.double_buffer(file_obj) - - image, label = fluid.layers.read_file(file_obj) - -双缓冲技术可以参考 -`Multiple buffering `_ 。 - -配置数据增强 ------------- - -使用 :code:`fluid.layers.Preprocessor` 可以配置文件的数据增强方法。例如 - -.. code-block:: python - - import paddle.fluid as fluid - file_obj = fluid.layers.open_files(...) - preprocessor = fluid.layers.Preprocessor(reader=data_file) - with preprocessor.block(): - image, label = preprocessor.inputs() - image = image / 2 - label = label + 1 - preprocessor.outputs(image, label) - -如上代码所示,使用 :code:`Preprocessor` 定义了一个数据增强模块,并在 -:code:`with preprocessor.block()` 中定义了数据增强的具体操作。 用户通过配置 -:code:`preprocessor.inputs()` 获得数据文件中的各个字段。 并用 -:code:`preprocessor.outputs()` 标记预处理后的输出。 - -.. _user_guide_use_recordio_as_train_data_use_op_create_batch: - -使用Op组batch -------------- - -使用 :code:`fluid.layers.batch()` 可以在训练的过程中动态的组batch。例如 - -.. code-block:: python - - import paddle.fluid as fluid - file_obj = fluid.layers.open_files(...) - file_obj = fluid.layers.batch(file_obj, batch_size=32) - - img, label = fluid.layers.read_file(file_obj) - -需要注意的是,如果数据集中的最后几个样本不能组成 :code:`batch_size` 大小的批量数据, -那么这几个样本直接组成一个批量数据进行训练。 - -读入数据的shuffle ------------------ - -使用 :code:`fluid.layers.shuffle()` 可以在训练过程中动态重排训练数据。例如 - -.. code-block:: python - - import paddle.fluid as fluid - file_obj = fluid.layers.open_files(...) - file_obj = fliud.layers.shuffle(file_obj, buffer_size=8192) - - img, label = fliud.layers.read_file(file_obj) - -需要注意的是: - -1. :code:`shuffle` 实现方法是: -先读入 :code:`buffer_size` 条样本,再随机的选出样本进行训练。 - -2. :code:`shuffle` 中 :code:`buffer_size` 会占用训练内存,需要确定训练过程中内存 -足够支持缓存 :code:`buffer_size` 条数据。 diff --git a/doc/fluid/new_docs/user_guides/index.rst b/doc/fluid/new_docs/user_guides/index.rst index 453cb71cfdf72e031ce0f0517e2db936eca38dfc..377631109d8f65c149b12cd2a0e4da920fdf4def 100644 --- a/doc/fluid/new_docs/user_guides/index.rst +++ b/doc/fluid/new_docs/user_guides/index.rst @@ -15,4 +15,5 @@ howto/training/index howto/debug/index howto/evaluation/index + howto/inference/index models/index.rst diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index cd00b7de7338982308acfa1f1e8c38e010c6a43b..c9e3a8ac1d1e5228725bff49ecc6d91e640dfe57 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -46,7 +46,7 @@ struct CastDataLayout { const std::vector axis_; template - void operator()() { + void apply() { auto place = ctx_->GetPlace(); if (platform::is_cpu_place(place)) { diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 84691a2059124960a3213802fec0863f8abe6df7..8ad2fb5f3ffd9641932bbbb024a31e81d31dc9bb 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -26,75 +26,40 @@ namespace framework { extern proto::VarType::Type ToDataType(std::type_index type); extern std::type_index ToTypeIndex(proto::VarType::Type type); -#if !defined(_WIN32) template inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { switch (type) { case proto::VarType::FP16: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::FP32: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::FP64: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT32: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT64: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::BOOL: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::UINT8: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT16: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT8: - visitor.template operator()(); + visitor.template apply(); break; default: PADDLE_THROW("Not supported %d", type); } } -#else -// the msvc compiler do not implement two-stage name lookup correctly. -template -inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { - switch (type) { - case proto::VarType::FP16: - visitor.operator()(); - break; - case proto::VarType::FP32: - visitor.operator()(); - break; - case proto::VarType::FP64: - visitor.operator()(); - break; - case proto::VarType::INT32: - visitor.operator()(); - break; - case proto::VarType::INT64: - visitor.operator()(); - break; - case proto::VarType::BOOL: - visitor.operator()(); - break; - case proto::VarType::UINT8: - visitor.operator()(); - break; - case proto::VarType::INT16: - visitor.operator()(); - break; - default: - PADDLE_THROW("Not supported %d", type); - } -} -#endif // _WIN32 extern std::string DataTypeToString(const proto::VarType::Type type); extern size_t SizeOfType(std::type_index type); diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index 5a57ec20585c26dbcd4251464718fc819148a7a5..d79f8cacb5f4727defc77380371e57bcea65f068 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -37,7 +37,7 @@ struct CastDataType { const platform::DeviceContext* ctx_; template - void operator()() { + void apply() { auto* in_begin = in_.data(); auto* in_end = in_begin + in_.numel(); auto* out_begin = out_->mutable_data(in_.place()); diff --git a/paddle/fluid/framework/details/reduce_and_gather.h b/paddle/fluid/framework/details/reduce_and_gather.h index e28264eb32756f77ef5baed3dff77ba9f0943160..bd6153c0c736f6e32378eebcbf6c4d7e402c9b42 100644 --- a/paddle/fluid/framework/details/reduce_and_gather.h +++ b/paddle/fluid/framework/details/reduce_and_gather.h @@ -31,7 +31,7 @@ struct ReduceLoDTensor { : src_tensors_(src), dst_tensor_(*dst) {} template - void operator()() const { + void apply() const { PADDLE_ENFORCE(!src_tensors_.empty()); auto &t0 = *src_tensors_[0]; PADDLE_ENFORCE_NE(t0.numel(), 0); diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index d2d051a69a33a38535e67227d4cc62f5b35e430c..0278ade6763ec614701674691797d766878a378e 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -13,6 +13,9 @@ // limitations under the License. #include "paddle/fluid/framework/ir/attention_lstm_fuse_pass.h" + +#include + #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h" #include "paddle/fluid/framework/lod_tensor.h" @@ -216,11 +219,11 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, float* out_data = out->mutable_data(platform::CPUPlace()); std::array tensors( - {W_forget_w0.data(), W_input_w0.data(), - W_output_w0.data(), W_cell_w0.data()}); + {{W_forget_w0.data(), W_input_w0.data(), + W_output_w0.data(), W_cell_w0.data()}}); std::array tensors1( - {W_forget_w1.data(), W_input_w1.data(), - W_output_w1.data(), W_cell_w1.data()}); + {{W_forget_w1.data(), W_input_w1.data(), + W_output_w1.data(), W_cell_w1.data()}}); for (int row = 0; row < D; row++) { for (int col = 0; col < 4; col++) { @@ -243,8 +246,8 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, const LoDTensor& B_output, const LoDTensor& B_cell, LoDTensor* out) { std::array tensors( - {B_forget.data(), B_input.data(), B_output.data(), - B_cell.data()}); + {{B_forget.data(), B_input.data(), B_output.data(), + B_cell.data()}}); PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); int D = B_forget.dims()[0]; diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc index 5852705b6b8d1c650faeae3dc810aac65353b459..c29eb4d4a6224773326c210d47d38fa9bf289a00 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc @@ -13,37 +13,37 @@ // limitations under the License. #include "paddle/fluid/framework/ir/fc_lstm_fuse_pass.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace framework { namespace ir { -std::unique_ptr FCLstmFusePass::ApplyImpl( - std::unique_ptr graph) const { - GraphPatternDetector gpd; - auto* pattern = gpd.mutable_pattern(); - - std::unordered_set fused_ops({// first lstm - 13, 15, 16, - // second lstm - 23, 25, 26}); - - pattern->NewNode([&](Node* x) { return fused_ops.count(x->id()); }, - "any_node"); +std::string GenNodeName(const std::string& prefix, const std::string& name) { + return prefix + "/" + name; +} - std::unordered_set marked_nodes; +void BuildPattern(PDPattern* pattern, const std::string& name_scope, + bool with_fc_bias) { + PDNode* x = pattern->NewNode(name_scope, "x") + ->assert_is_op_input("mul") + ->assert_var_not_persistable(); + auto* fc_out = patterns::FC(pattern, name_scope, x, with_fc_bias); + fc_out->AsIntermediate(); // fc_out is a tmp var, will be removed after fuse. + patterns::LSTM(pattern, name_scope, fc_out); + // LOG(INFO) << "\n" << pattern->DotString(); +} - auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, - Graph* g) { +int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope, + bool with_fc_bias) { + GraphPatternDetector gpd; + auto* pattern = gpd.mutable_pattern(); - auto* id = subgraph.at(gpd.pattern().RetrieveNode("any_node")); - marked_nodes.insert(id); - }; - gpd(graph.get(), handler); + BuildPattern(pattern, name_scope, with_fc_bias); // Create New OpDesc auto lstm_creator = [&](int lstm, int input, int weight_x, int weight_h, - int bias, int hidden, int cell, int xx) { + int bias, int hidden, int cell, int xx, int fc_bias) { #define GET_NODE(x) auto* x##_n = graph->RetriveNode(x); GET_NODE(input); GET_NODE(weight_x); @@ -61,12 +61,33 @@ std::unique_ptr FCLstmFusePass::ApplyImpl( SET_IN(WeightX, weight_x); SET_IN(WeightH, weight_h); SET_IN(Bias, bias); -#undef GET_NODE #undef SET_IN + if (with_fc_bias) { + // Add FC-bias with LSTM-bias and create a new weight + PADDLE_ENFORCE(scope); + const std::string& new_bias_var = name_scope + "_bias.new"; + auto* bias_var = scope->Var(new_bias_var); + PADDLE_ENFORCE(bias_var); + auto* bias_tensor = bias_var->GetMutable(); + auto* lstm_bias_var = scope->FindVar(bias_n->Name()); + PADDLE_ENFORCE(lstm_bias_var); + const auto& lstm_bias_tensor = lstm_bias_var->Get(); + bias_tensor->Resize(lstm_bias_tensor.dims()); + + GET_NODE(fc_bias); + auto* fc_bias_var = scope->FindVar(fc_bias_n->Name()); + const auto& fc_bias_tensor = fc_bias_var->Get(); + + auto* data = bias_tensor->mutable_data(platform::CPUPlace()); + + for (int i = 0; i < bias_tensor->numel(); i++) { + data[i] = + fc_bias_tensor.data()[i] + lstm_bias_tensor.data()[i]; + } + op_desc.SetInput("Bias", {new_bias_var}); + } - VLOG(4) << "hidden_n: " << hidden_n->Name(); - VLOG(4) << "cell: " << cell_n->Name(); - VLOG(4) << "xx: " << xx_n->Name(); +#undef GET_NODE op_desc.SetInput("H0", {}); op_desc.SetInput("C0", {}); @@ -76,7 +97,7 @@ std::unique_ptr FCLstmFusePass::ApplyImpl( op_desc.SetOutput("BatchedGate", {"blstm_0.tmp_2"}); op_desc.SetOutput("BatchCellPreAct", {"blstm_1.tmp_2"}); op_desc.SetAttr("is_reverse", lstm_n->Op()->GetAttr("is_reverse")); - op_desc.SetAttr("use_peepholes", false); + op_desc.SetAttr("use_peepholes", lstm_n->Op()->GetAttr("use_peepholes")); auto* op = graph->CreateOpNode(&op_desc); #define LINK_TO(a, b) \ @@ -89,33 +110,71 @@ std::unique_ptr FCLstmFusePass::ApplyImpl( LINK_TO(op, hidden_n); #undef LINK_TO return op; - }; - lstm_creator(16, 12, 14, 18, 17, 22, 21, 19); - lstm_creator(26, 12, 24, 28, 27, 32, 31, 29); + int fusion_count{0}; - // remove all the nodes + auto fc_no_bias_handler = [&]( + const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { - for (auto* node : marked_nodes) { - graph->RemoveNode(const_cast(node)); - } +#define GET_NODE(name__) \ + std::string name__##key = name_scope + "/" + #name__; \ + auto* name__##n = pattern->RetrieveNode(name__##key); \ + PADDLE_ENFORCE(name__##n); \ + PADDLE_ENFORCE(subgraph.count(name__##n)); \ + Node* name__##_n = subgraph.at(name__##n); \ + int name__ __attribute__((unused)) = name__##_n->id(); - for (auto* node : graph->Nodes()) { - for (auto it = node->inputs.begin(); it != node->inputs.end();) { - if (marked_nodes.count(*it)) { - it = const_cast(node)->inputs.erase(it); - } else - it++; - } - for (auto it = node->outputs.begin(); it != node->outputs.end();) { - if (marked_nodes.count(*it)) { - it = const_cast(node)->outputs.erase(it); - } else - it++; + GET_NODE(x); + GET_NODE(w); + GET_NODE(mul); + GET_NODE(fc_out); + GET_NODE(Weight); + GET_NODE(lstm); + GET_NODE(Bias); + GET_NODE(Hidden); + GET_NODE(Cell); + + if (with_fc_bias) { + GET_NODE(fc_bias); + lstm_creator(lstm, x, w, Weight, Bias, Hidden, Cell, fc_out, fc_bias); + } else { + lstm_creator(lstm, x, w, Weight, Bias, Hidden, Cell, fc_out, -1); } - } +#undef GET_NODE + + // Remove unneeded nodes. + std::unordered_set marked_nodes({mul_n, lstm_n}); + + GraphSafeRemoveNodes(graph, marked_nodes); + + ++fusion_count; + }; + + gpd(graph, fc_no_bias_handler); + + return fusion_count; +} + +std::unique_ptr MulLstmFusePass::ApplyImpl( + std::unique_ptr graph) const { + FusePassBase::Init(name_scope_, graph.get()); + + int fusion_count = BuildFusion(graph.get(), name_scope_, param_scope(), + false /*with_fc_bias*/); + + AddStatis(fusion_count); + return graph; +} + +std::unique_ptr FCLstmFusePass::ApplyImpl( + std::unique_ptr graph) const { + FusePassBase::Init(name_scope_, graph.get()); + + int fusion_count = BuildFusion(graph.get(), name_scope_, param_scope(), + true /*with_fc_bias*/); + AddStatis(fusion_count); return graph; } @@ -123,4 +182,5 @@ std::unique_ptr FCLstmFusePass::ApplyImpl( } // namespace framework } // namespace paddle +REGISTER_PASS(mul_lstm_fuse_pass, paddle::framework::ir::MulLstmFusePass); REGISTER_PASS(fc_lstm_fuse_pass, paddle::framework::ir::FCLstmFusePass); diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h index 74b08ae558b12c9328db58687cd01edbc37291a8..5a6687872eb3ab4a032227fda9ff0e7f5254670b 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h @@ -12,20 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" -#include "paddle/fluid/framework/ir/pass.h" namespace paddle { namespace framework { namespace ir { -class FCLstmFusePass : public Pass { +// The MulLstmFusePass and MulLstmFusePass will fuse to the same FusionLstm op. + +// Just FC without bias +class FCLstmFusePass : public FusePassBase { public: virtual ~FCLstmFusePass() {} protected: std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + + const std::string name_scope_{"fc_lstm_fuse"}; +}; + +class MulLstmFusePass : public FusePassBase { + public: + virtual ~MulLstmFusePass() {} + + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + const std::string name_scope_{"fc_nobias_lstm_fuse"}; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 55e495a0ed75c3a09703438dcfe01ca8f9d36118..ae8496204d4aeb88c04154d571325d440274e821 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -167,7 +167,6 @@ class Graph { std::map> attr_dels_; std::map> nodes_; std::unordered_set node_set_; - int node_count_{0}; }; bool IsControlDepVar(const ir::Node &var); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 945ab110b148c320b6626cadaa47d483df68419e..f651ab635eadc9f248964e91dceebf3aa9c42926 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -19,6 +19,7 @@ #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/graph_traits.h" +#include "paddle/fluid/framework/ir/graph_viz_pass.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -71,7 +72,11 @@ void PDPattern::AddEdge(PDNode* a, PDNode* b) { void GraphPatternDetector::operator()(Graph* graph, GraphPatternDetector::handle_t handler) { - if (!MarkPDNodesInGraph(*graph)) return; + if (!MarkPDNodesInGraph(*graph)) { + LOG(INFO) << "Mark failed"; + return; + } + auto subgraphs = DetectPatterns(); UniquePatterns(&subgraphs); RemoveOverlappedMatch(&subgraphs); @@ -87,7 +92,7 @@ void GraphPatternDetector::operator()(Graph* graph, } bool GraphPatternDetector::MarkPDNodesInGraph(const ir::Graph& graph) { - VLOG(4) << "mark pdnodes in graph"; + VLOG(3) << "mark pdnodes in graph"; if (graph.Nodes().empty()) return false; for (auto& node : GraphTraits::DFS(graph)) { @@ -107,6 +112,7 @@ bool GraphPatternDetector::MarkPDNodesInGraph(const ir::Graph& graph) { } } VLOG(3) << pdnodes2nodes_.size() << " nodes marked"; + return !pdnodes2nodes_.empty(); } @@ -357,7 +363,9 @@ PDNode* PDNode::assert_is_op_nth_input(const std::string& op_type, assert_is_op_input(op_type); asserts_.emplace_back([=](Node* x) { for (auto* op : x->outputs) { - if (IsNthInput(x, op, argument, nth)) return true; + if (op->IsOp() && op->Op()->Type() == op_type && + IsNthInput(x, op, argument, nth)) + return true; } return false; }); @@ -368,7 +376,9 @@ PDNode* PDNode::assert_is_op_nth_output(const std::string& op_type, assert_is_var(); asserts_.emplace_back([=](Node* x) { for (auto* op : x->inputs) { - if (IsNthOutput(x, op, argument, nth)) return true; + if (op->IsOp() && op->Op()->Type() == op_type && + IsNthOutput(x, op, argument, nth)) + return true; } return false; }); @@ -412,6 +422,12 @@ PDNode* PDNode::assert_is_op_output(const std::string& op_type) { }); return this; } +PDNode* PDNode::assert_is_op_output(const std::string& op_type, + const std::string& argument) { + assert_is_var(); + assert_is_op_nth_output(op_type, argument, 0); + return this; +} PDNode* PDNode::assert_is_op_input(const std::string& op_type) { assert_is_var(); asserts_.emplace_back([=](Node* x) { @@ -424,6 +440,12 @@ PDNode* PDNode::assert_is_op_input(const std::string& op_type) { }); return this; } +PDNode* PDNode::assert_is_op_input(const std::string& op_type, + const std::string& argument) { + assert_is_var(); + assert_is_op_nth_input(op_type, argument, 0); + return this; +} PDNode* PDNode::assert_op_has_n_inputs(const std::string& op_type, size_t n) { assert_is_op(op_type); asserts_.emplace_back([=](Node* x) { return x->inputs.size() == n; }); @@ -439,6 +461,128 @@ PDNode* PDNode::assert_more(PDNode::teller_t&& teller) { return this; } +bool VarLinksToOp(Node* node, const std::string& op_type) { + for (auto* out : node->outputs) { + if (out->IsOp() && out->Op()->Type() == op_type) { + return true; + } + } + return false; +} +bool IsNthInput(Node* var, Node* op, const std::string& argument, size_t nth) { + PADDLE_ENFORCE(var->IsVar()); + PADDLE_ENFORCE(op->IsOp()); + if (op->Op()->Input(argument).size() <= nth) return false; + return var->Name() == op->Op()->Input(argument)[nth]; +} +bool IsNthOutput(Node* var, Node* op, const std::string& argument, size_t nth) { + PADDLE_ENFORCE(var->IsVar()); + PADDLE_ENFORCE(op->IsOp()); + if (op->Op()->Output(argument).size() <= nth) return false; + return var->Name() == op->Op()->Output(argument)[nth]; +} +void GraphSafeRemoveNodes(Graph* graph, + const std::unordered_set& nodes) { + for (auto* node : nodes) { + graph->RemoveNode(const_cast(node)); + } + + for (auto* node : graph->Nodes()) { + for (auto it = node->inputs.begin(); it != node->inputs.end();) { + if (nodes.count(*it)) { + it = const_cast(node)->inputs.erase(it); + } else + it++; + } + for (auto it = node->outputs.begin(); it != node->outputs.end();) { + if (nodes.count(*it)) { + it = const_cast(node)->outputs.erase(it); + } else + it++; + } + } +} +bool VarLinksFromOp(Node* node, const std::string& op_type) { + for (auto* out : node->inputs) { + if (out->IsOp() && out->Op()->Type() == op_type) { + return true; + } + } + return false; +} + +PDNode* patterns::FC(PDPattern* pattern, const std::string& name_scope, + PDNode* x, bool with_bias) { + // Create Operators + PDNode* elementwise_add_op{nullptr}; + auto* mul_op = pattern->NewNode(name_scope, "mul")->assert_is_op("mul"); + if (with_bias) { + elementwise_add_op = pattern->NewNode(name_scope, "elementwise_add") + ->assert_is_op("elementwise_add"); + } + // Create variables + // w + auto* mul_weight_var = pattern->NewNode(name_scope, "w") + ->AsInput() + ->assert_is_persistable_var() + ->assert_is_op_nth_input("mul", "Y", 0); + PDNode* mul_out_var{nullptr}; + if (with_bias) { + // intermediate variable, will be removed in the IR after fuse. + mul_out_var = pattern->NewNode(name_scope, "mul_out") + ->AsIntermediate() + ->assert_is_only_output_of_op("mul") + ->assert_is_op_input("elementwise_add"); + } + PDNode *bias{nullptr}, *fc_out{nullptr}; + if (with_bias) { + // bias + bias = pattern->NewNode(name_scope, "fc_bias") + ->assert_is_op_input("elementwise_add") + ->AsInput(); + // output + fc_out = pattern->NewNode(name_scope, "fc_out") + ->AsOutput() + ->assert_is_op_output("elementwise_add"); + } else { + fc_out = pattern->NewNode(name_scope, "fc_out") + ->AsOutput() + ->assert_is_op_output("mul"); + } + + if (with_bias) { + mul_op->LinksFrom({mul_weight_var, x}).LinksTo({mul_out_var}); + elementwise_add_op->LinksFrom({mul_out_var, bias}).LinksTo({fc_out}); + } else { + mul_op->LinksFrom({mul_weight_var, x}).LinksTo({fc_out}); + } + + return fc_out; +} +PDNode* patterns::LSTM(PDPattern* pattern, const std::string& name_scope, + PDNode* x) { + x->assert_is_op_input("lstm", "Input"); + auto* lstm_op = pattern->NewNode(name_scope, "lstm")->assert_is_op("lstm"); +#define NEW_NODE(arg__, io__) \ + auto* arg__ = pattern->NewNode(name_scope, #arg__) \ + ->assert_is_op_##io__("lstm", #arg__); + + // Currently, the H0 and C0 are optional + // TODO(Superjomn) upgrade the fuse framework to support optional. + // NEW_NODE(H0, input); + // NEW_NODE(C0, input); + NEW_NODE(Weight, input); + NEW_NODE(Bias, input); + + NEW_NODE(Hidden, output); + NEW_NODE(Cell, output); + NEW_NODE(BatchGate, output); + NEW_NODE(BatchCellPreAct, output); + + lstm_op->LinksFrom({x, Weight, Bias}); + lstm_op->LinksTo({Hidden, Cell, BatchGate, BatchCellPreAct}); + return Hidden; +} } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index f8488c84962d1caa6e7817b3c0349d6da3a59182..024ce8ce55616cc5e0eaced4a27a6e1fb004af2c 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -95,7 +95,11 @@ struct PDNode { PDNode* assert_var_not_persistable(); PDNode* assert_is_persistable_var(); PDNode* assert_is_op_output(const std::string& op_type); + PDNode* assert_is_op_output(const std::string& op_type, + const std::string& argument); PDNode* assert_is_op_input(const std::string& op_type); + PDNode* assert_is_op_input(const std::string& op_type, + const std::string& argument); PDNode* assert_is_op_nth_input(const std::string& op_type, const std::string& argument, int nth); PDNode* assert_is_op_nth_output(const std::string& op_type, @@ -167,6 +171,9 @@ class PDPattern { PDNode* NewNode(PDNode::teller_t&& teller, const std::string& name = NewID()); PDNode* NewNode(const std::string& name = NewID()); + PDNode* NewNode(const std::string& prefix, const std::string& name) { + return NewNode(prefix + "/" + name); + } PDNode* RetrieveNode(const std::string& id) const; const std::vector>& nodes() const { return nodes_; } @@ -257,64 +264,36 @@ class GraphPatternDetector { // some helper methods. -// Op's input. -static bool VarLinksToOp(Node* node, const std::string& op_type) { - for (auto* out : node->outputs) { - if (out->IsOp() && out->Op()->Type() == op_type) { - return true; - } - } - return false; -} - -// Op's output. -static bool VarLinksFromOp(Node* node, const std::string& op_type) { - for (auto* out : node->inputs) { - if (out->IsOp() && out->Op()->Type() == op_type) { - return true; - } - } - return false; -} +// Tell if a var links to an Op +bool VarLinksToOp(Node* node, const std::string& op_type); + +// Tell if an op links to a var +bool VarLinksFromOp(Node* node, const std::string& op_type); // Check whether a var node is a op node's nth input. -static bool IsNthInput(Node* var, Node* op, const std::string& argument, - size_t nth) { - PADDLE_ENFORCE(var->IsVar()); - PADDLE_ENFORCE(op->IsOp()); - if (op->inputs.size() <= nth) return false; - return var->Name() == op->Op()->Input(argument)[nth]; -} - -static bool IsNthOutput(Node* var, Node* op, const std::string& argument, - size_t nth) { - PADDLE_ENFORCE(var->IsVar()); - PADDLE_ENFORCE(op->IsOp()); - if (op->inputs.size() <= nth) return false; - return var->Name() == op->Op()->Output(argument)[nth]; -} - -static void GraphSafeRemoveNodes(Graph* graph, - const std::unordered_set& nodes) { - for (auto* node : nodes) { - graph->RemoveNode(const_cast(node)); - } +bool IsNthInput(Node* var, Node* op, const std::string& argument, size_t nth); - for (auto* node : graph->Nodes()) { - for (auto it = node->inputs.begin(); it != node->inputs.end();) { - if (nodes.count(*it)) { - it = const_cast(node)->inputs.erase(it); - } else - it++; - } - for (auto it = node->outputs.begin(); it != node->outputs.end();) { - if (nodes.count(*it)) { - it = const_cast(node)->outputs.erase(it); - } else - it++; - } - } -} +// Tell whether a var node is a op node's nth output. +bool IsNthOutput(Node* var, Node* op, const std::string& argument, size_t nth); + +// Graph safely remove some nodes, will automatically clean up the edges. +void GraphSafeRemoveNodes(Graph* graph, + const std::unordered_set& nodes); + +// Some pre-defined patterns those can be reused in multiple passes. +namespace patterns { + +// FC with bias +// op: mul + elementwise_add +// named nodes: +// mul, elementwise_add +// w, mul_out, bias, fc_out +PDNode* FC(PDPattern* pattern, const std::string& name_scope, PDNode* x, + bool with_bias); + +PDNode* LSTM(PDPattern* pattern, const std::string& name_scope, PDNode* x); + +} // namespace patterns } // namespace ir } // namespace framework diff --git a/paddle/fluid/framework/ir/graph_viz_pass.h b/paddle/fluid/framework/ir/graph_viz_pass.h index 8d885cb9e4ee6e01de386b0f22423988dbe60ca6..e64916a5bb662e3b00cfe212f0bbbc537c7bc2cc 100644 --- a/paddle/fluid/framework/ir/graph_viz_pass.h +++ b/paddle/fluid/framework/ir/graph_viz_pass.h @@ -42,6 +42,13 @@ class GraphVizPass : public Pass { marked_nodes_t ConsumeMarkedNodes(Graph* graph) const; }; +static GraphVizPass::marked_nodes_t& GetMarkedNodes(Graph* graph) { + if (!graph->Has(kGraphvizMarkedNodeAttr)) { + graph->Set(kGraphvizMarkedNodeAttr, new GraphVizPass::marked_nodes_t); + } + return graph->Get(kGraphvizMarkedNodeAttr); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/rw_lock.h b/paddle/fluid/framework/rw_lock.h index a068d3543d9d2abec203f86362a8be5ba135d04d..da163835e8652ae479121bd67f2eed77332b2740 100644 --- a/paddle/fluid/framework/rw_lock.h +++ b/paddle/fluid/framework/rw_lock.h @@ -56,5 +56,76 @@ struct RWLock { }; #endif +class RWLockGuard { + public: + enum Status { kUnLock, kWRLock, kRDLock }; + + RWLockGuard(RWLock* rw_lock, Status init_status) + : lock_(rw_lock), status_(Status::kUnLock) { + switch (init_status) { + case Status::kRDLock: { + RDLock(); + break; + } + case Status::kWRLock: { + WRLock(); + break; + } + case Status::kUnLock: { + break; + } + } + } + + void WRLock() { + switch (status_) { + case Status::kUnLock: { + lock_->WRLock(); + status_ = Status::kWRLock; + break; + } + case Status::kWRLock: { + break; + } + case Status::kRDLock: { + PADDLE_THROW( + "Please unlock read lock first before invoking write lock."); + break; + } + } + } + + void RDLock() { + switch (status_) { + case Status::kUnLock: { + lock_->RDLock(); + status_ = Status::kRDLock; + break; + } + case Status::kRDLock: { + break; + } + case Status::kWRLock: { + PADDLE_THROW( + "Please unlock write lock first before invoking read lock."); + break; + } + } + } + + void UnLock() { + if (status_ != Status::kUnLock) { + lock_->UNLock(); + status_ = Status::kUnLock; + } + } + + ~RWLockGuard() { UnLock(); } + + private: + RWLock* lock_; + Status status_; +}; + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index a4319ffabb04f39437b76d97845e021ef9de66d3..8c290bb095d554a973e66a3a19606a06759fd668 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -49,7 +49,7 @@ struct TensorCopyVisitor { size_(size) {} template - void operator()() const { + void apply() const { // TODO(Yancey1989): support other place platform::CPUPlace cpu; memory::Copy(cpu, dst_->mutable_data(cpu) + dst_offset_, cpu, diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index ab693004cfb038fd92afd9c60e0fcb4e16b9f8a9..05c4a17a01c6fabe48f3fe18544c13153feb0673 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -149,7 +149,7 @@ struct AnyDTypeVisitor { : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {} template - void operator()() const { + void apply() const { auto t = EigenVector::Flatten(tensor_); auto o = EigenScalar::From(*out_); // return any of predicate_(t) is true. @@ -302,7 +302,7 @@ struct DeserializedDataFunctor { : buf_(buf), tensor_(tensor), place_(place) {} template - void operator()() { + void apply() { *buf_ = tensor_->mutable_data(place_); } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 46c92d01bf3d4b00f24e1fc9790157d7ddc200d2..83666dd9883653827af61159585aeadcf1d4357c 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -17,9 +17,9 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) # paddle_fluid_origin exclude inference api interface cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api) -if(NOT APPLE) +#if(APPLE) add_subdirectory(api) -endif() +#endif() # Create static library cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor) diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index d43ecc722ea3c78541835fb3f5efc9a3529fbf11..cc0dd0d492d42e9552c9ce081e268330599104f0 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -25,9 +25,8 @@ function (inference_analysis_test TARGET) if(WITH_TESTING) set(options "") set(oneValueArgs "") - set(multiValueArgs SRCS EXTRA_DEPS) + set(multiValueArgs SRCS ARGS EXTRA_DEPS) cmake_parse_arguments(analysis_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set(mem_opt "") if(WITH_GPU) set(mem_opt "--fraction_of_gpu_memory_to_use=0.5") @@ -35,7 +34,7 @@ function (inference_analysis_test TARGET) cc_test(${TARGET} SRCS "${analysis_test_SRCS}" DEPS analysis graph fc_fuse_pass graph_viz_pass infer_clean_graph_pass graph_pattern_detector pass ${analysis_test_EXTRA_DEPS} - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model ${mem_opt}) + ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model ${mem_opt} ${analysis_test_ARGS}) set_tests_properties(${TARGET} PROPERTIES DEPENDS test_word2vec) endif(WITH_TESTING) endfunction(inference_analysis_test) @@ -51,7 +50,7 @@ endfunction(inference_download_and_uncompress) set(DITU_RNN_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fmodel.tar.gz") set(DITU_RNN_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fdata.txt.tar.gz") set(DITU_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/ditu_rnn" CACHE PATH "Ditu RNN model and data root." FORCE) -if (NOT EXISTS ${DITU_INSTALL_DIR}) +if (NOT EXISTS ${DITU_INSTALL_DIR} AND WITH_TESTING) inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_MODEL_URL} "ditu_rnn_fluid%2Fmodel.tar.gz") inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_DATA_URL} "ditu_rnn_fluid%2Fdata.txt.tar.gz") endif() @@ -70,8 +69,7 @@ inference_analysis_test(test_analyzer SRCS analyzer_tester.cc attention_lstm_fuse_pass paddle_inference_api pass - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model - --infer_ditu_rnn_model=${DITU_INSTALL_DIR}/model + ARGS --infer_ditu_rnn_model=${DITU_INSTALL_DIR}/model --infer_ditu_rnn_data=${DITU_INSTALL_DIR}/data.txt) inference_analysis_test(test_data_flow_graph SRCS data_flow_graph_tester.cc) @@ -88,13 +86,25 @@ inference_analysis_test(test_model_store_pass SRCS model_store_pass_tester.cc) set(CHINESE_NER_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/chinese_ner_model.tar.gz") set(CHINESE_NER_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/chinese_ner-data.txt.tar.gz") set(CHINESE_NER_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/chinese_ner" CACHE PATH "Chinese ner model and data root." FORCE) -if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR}) +if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING) inference_download_and_uncompress(${CHINESE_NER_INSTALL_DIR} ${CHINESE_NER_MODEL_URL} "chinese_ner_model.tar.gz") inference_download_and_uncompress(${CHINESE_NER_INSTALL_DIR} ${CHINESE_NER_DATA_URL} "chinese_ner-data.txt.tar.gz") endif() -inference_analysis_test(test_chinese_ner SRCS chinese_ner_tester.cc +inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc EXTRA_DEPS paddle_inference_api paddle_fluid_api - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model - --infer_model=${CHINESE_NER_INSTALL_DIR}/model + ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model --infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt) + +set(LAC_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/lac_model.tar.gz") +set(LAC_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/lac_data.txt.tar.gz") +set(LAC_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/lac" CACHE PATH "LAC model and data root." FORCE) +if (NOT EXISTS ${LAC_INSTALL_DIR} AND WITH_TESTING) + inference_download_and_uncompress(${LAC_INSTALL_DIR} ${LAC_MODEL_URL} "lac_model.tar.gz") + inference_download_and_uncompress(${LAC_INSTALL_DIR} ${LAC_DATA_URL} "lac_data.txt.tar.gz") +endif() + +inference_analysis_test(test_analyzer_lac SRCS analyzer_lac_tester.cc + EXTRA_DEPS paddle_inference_api paddle_fluid_api + ARGS --infer_model=${LAC_INSTALL_DIR}/model + --infer_data=${LAC_INSTALL_DIR}/data.txt) diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index e6e63544ffa2de09e39b02769aaaf0793d6b1111..192ac2daa6a78efec6db19870f71e80593c62da9 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -109,6 +109,7 @@ void Analyzer::Run(Argument* argument) { "infer_clean_graph_pass", "graph_viz_pass", // "attention_lstm_fuse_pass", "graph_viz_pass", // "fc_lstm_fuse_pass", "graph_viz_pass", // + "mul_lstm_fuse_pass", "graph_viz_pass", // "seq_concat_fc_fuse_pass", "graph_viz_pass", // "fc_fuse_pass", "graph_viz_pass" // diff --git a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..e2f7253ac04cac8457fa60a055e4ef2770aa874b --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc @@ -0,0 +1,199 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/inference/analysis/analyzer.h" +#include +#include +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/platform/profiler.h" + +DEFINE_string(infer_model, "", "model path for LAC"); +DEFINE_string(infer_data, "", "data file for LAC"); +DEFINE_int32(batch_size, 1, "batch size."); +DEFINE_int32(burning, 0, "Burning before repeat."); +DEFINE_int32(repeat, 1, "Running the inference program repeat times."); +DEFINE_bool(test_all_data, false, "Test the all dataset in data file."); + +namespace paddle { +namespace inference { +namespace analysis { + +struct DataRecord { + std::vector data; + std::vector lod; + // for dataset and nextbatch + size_t batch_iter{0}; + std::vector> batched_lods; + std::vector> batched_datas; + std::vector> datasets; + DataRecord() = default; + explicit DataRecord(const std::string &path, int batch_size = 1) { + Load(path); + Prepare(batch_size); + batch_iter = 0; + } + void Load(const std::string &path) { + std::ifstream file(path); + std::string line; + int num_lines = 0; + datasets.resize(0); + while (std::getline(file, line)) { + num_lines++; + std::vector data; + split(line, ';', &data); + std::vector words_ids; + split_to_int64(data[1], ' ', &words_ids); + datasets.emplace_back(words_ids); + } + } + void Prepare(int bs) { + if (bs == 1) { + batched_datas = datasets; + for (auto one_sentence : datasets) { + batched_lods.push_back({0, one_sentence.size()}); + } + } else { + std::vector one_batch; + std::vector lod{0}; + int bs_id = 0; + for (auto one_sentence : datasets) { + bs_id++; + one_batch.insert(one_batch.end(), one_sentence.begin(), + one_sentence.end()); + lod.push_back(lod.back() + one_sentence.size()); + if (bs_id == bs) { + bs_id = 0; + batched_datas.push_back(one_batch); + batched_lods.push_back(lod); + one_batch.clear(); + one_batch.resize(0); + lod.clear(); + lod.resize(0); + lod.push_back(0); + } + } + if (one_batch.size() != 0) { + batched_datas.push_back(one_batch); + batched_lods.push_back(lod); + } + } + } + DataRecord NextBatch() { + DataRecord data; + data.data = batched_datas[batch_iter]; + data.lod = batched_lods[batch_iter]; + batch_iter++; + if (batch_iter >= batched_datas.size()) { + batch_iter = 0; + } + return data; + } +}; +void GetOneBatch(std::vector *input_slots, DataRecord *data, + int batch_size) { + auto one_batch = data->NextBatch(); + PaddleTensor input_tensor; + input_tensor.name = "word"; + input_tensor.shape.assign({static_cast(one_batch.data.size()), 1}); + input_tensor.lod.assign({one_batch.lod}); + input_tensor.dtype = PaddleDType::INT64; + TensorAssignData(&input_tensor, {one_batch.data}); + PADDLE_ENFORCE_EQ(batch_size, static_cast(one_batch.lod.size() - 1)); + input_slots->assign({input_tensor}); +} +static void PrintTime(const double latency, const int bs, const int repeat) { + LOG(INFO) << "===========profile result==========="; + LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat + << ", avg latency: " << latency / repeat << "ms"; + LOG(INFO) << "====================================="; +} +void BenchAllData(const std::string &model_path, const std::string &data_file, + const int batch_size, const int repeat) { + NativeConfig config; + config.model_dir = model_path; + config.use_gpu = false; + config.device = 0; + config.specify_input_name = true; + std::vector input_slots, outputs_slots; + DataRecord data(data_file, batch_size); + auto predictor = + CreatePaddlePredictor(config); + GetOneBatch(&input_slots, &data, batch_size); + for (int i = 0; i < FLAGS_burning; i++) { + predictor->Run(input_slots, &outputs_slots); + } + Timer timer; + double sum = 0; + for (int i = 0; i < repeat; i++) { + for (size_t bid = 0; bid < data.batched_datas.size(); ++bid) { + GetOneBatch(&input_slots, &data, batch_size); + timer.tic(); + predictor->Run(input_slots, &outputs_slots); + sum += timer.toc(); + } + } + PrintTime(sum, batch_size, repeat); +} +const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, + 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, + 44, 14, 15, 44, 14, 15, 44, 14, 15, 44, 38, 39, + 14, 15, 44, 22, 23, 23, 23, 23, 23, 23, 23}; +void TestLACPrediction(const std::string &model_path, + const std::string &data_file, const int batch_size, + const int repeat, bool test_all_data) { + if (test_all_data) { + BenchAllData(model_path, data_file, batch_size, repeat); + return; + } + NativeConfig config; + config.model_dir = model_path; + config.use_gpu = false; + config.device = 0; + config.specify_input_name = true; + std::vector input_slots, outputs_slots; + DataRecord data(data_file, batch_size); + GetOneBatch(&input_slots, &data, batch_size); + auto predictor = + CreatePaddlePredictor(config); + for (int i = 0; i < FLAGS_burning; i++) { + predictor->Run(input_slots, &outputs_slots); + } + Timer timer; + timer.tic(); + for (int i = 0; i < repeat; i++) { + predictor->Run(input_slots, &outputs_slots); + } + PrintTime(timer.toc(), batch_size, repeat); + EXPECT_EQ(outputs_slots.size(), 1UL); + auto &out = outputs_slots[0]; + size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, + [](int a, int b) { return a * b; }); + size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t); + PADDLE_ENFORCE_GT(size, 0); + EXPECT_GE(size, batch1_size); + int64_t *pdata = static_cast(out.data.data()); + for (size_t i = 0; i < batch1_size; ++i) { + EXPECT_EQ(pdata[i], lac_ref_data[i]); + } +} +TEST(Analyzer_LAC, native) { + LOG(INFO) << "LAC with native"; + TestLACPrediction(FLAGS_infer_model, FLAGS_infer_data, FLAGS_batch_size, + FLAGS_repeat, FLAGS_test_all_data); +} +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/chinese_ner_tester.cc b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc similarity index 100% rename from paddle/fluid/inference/analysis/chinese_ner_tester.cc rename to paddle/fluid/inference/analysis/analyzer_ner_tester.cc index 9088a29d504309bc2c7b96fd49a0bf44e7cf0da9..720a8811db75a91a5774a29dd95285eceabadf83 100644 --- a/paddle/fluid/inference/analysis/chinese_ner_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle/fluid/inference/analysis/analyzer.h" #include #include #include "paddle/fluid/framework/ir/pass.h" -#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 1a65e85dd237eb1bacd3c15b4538a9835ec4b9e0..ec1f3979a74bd86ee7402bca441e95d3d177d113 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -34,7 +34,7 @@ namespace paddle { namespace inference { namespace analysis { -using namespace framework; +using namespace framework; // NOLINT TEST(Analyzer, analysis_without_tensorrt) { FLAGS_IA_enable_tensorrt_subgraph_engine = false; @@ -312,8 +312,8 @@ void TestDituRNNPrediction(const std::string &model_path, PADDLE_ENFORCE_GT(size, 0); float *data = static_cast(out.data.data()); float *base_data = static_cast(base_out.data.data()); - for (size_t i = 0; i < size; i++) { - EXPECT_NEAR(data[i], base_data[i], 1e-3); + for (size_t j = 0; j < size; j++) { + EXPECT_NEAR(data[j], base_data[j], 1e-3); } } @@ -329,6 +329,7 @@ void TestDituRNNPrediction(const std::string &model_path, ASSERT_TRUE(fuse_statis.count("fc")); EXPECT_EQ(fuse_statis.at("fc"), 1); + EXPECT_EQ(fuse_statis.at("fc_nobias_lstm_fuse"), 1); } } diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 3a4ffe967e67ab0487192bbf12d4d5a15f536aa3..e8fb0775b45761f64fd6fd28306c35b76d1e40c4 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -67,7 +67,7 @@ struct Argument { PADDLE_ENFORCE(!attrs_.count(key), "Duplicate set Argument's attr [%s]", key); attrs_[key] = data; - attr_deleters_[key] = [data, key, this]() { + attr_deleters_[key] = [data, key]() { VLOG(3) << "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; VLOG(3) << "argument delete attr: " << key; delete data; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 530274f0c9262b6ed0e43766606585c8459eabb9..38b11d9113e4b03f8365b969009f7a385a683a70 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include #include @@ -23,32 +22,14 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/inference/api/api_impl.h" +#include "paddle/fluid/inference/api/timer.h" #include "paddle/fluid/platform/profiler.h" DEFINE_bool(profile, false, "Turn on profiler for fluid"); namespace paddle { namespace { - -// Timer for timer -class Timer { - public: - double start; - double startu; - void tic() { - struct timeval tp; - gettimeofday(&tp, NULL); - start = tp.tv_sec; - startu = tp.tv_usec; - } - double toc() { - struct timeval tp; - gettimeofday(&tp, NULL); - double used_time_ms = - (tp.tv_sec - start) * 1000.0 + (tp.tv_usec - startu) / 1000.0; - return used_time_ms; - } -}; +using paddle::inference::Timer; template std::string num2str(T a) { @@ -62,14 +43,14 @@ void NativePaddlePredictor::PrepareFeedFetch() { for (auto *op : inference_program_->Block(0).AllOps()) { if (op->Type() == "feed") { int idx = boost::get(op->GetAttr("col")); - if (feeds_.size() <= (size_t)idx) { + if (feeds_.size() <= static_cast(idx)) { feeds_.resize(idx + 1); } feeds_[idx] = op; feed_names_[op->Output("Out")[0]] = idx; } else if (op->Type() == "fetch") { int idx = boost::get(op->GetAttr("col")); - if (fetchs_.size() <= (size_t)idx) { + if (fetchs_.size() <= static_cast(idx)) { fetchs_.resize(idx + 1); } fetchs_[idx] = op; @@ -80,7 +61,7 @@ void NativePaddlePredictor::PrepareFeedFetch() { bool NativePaddlePredictor::Init( std::shared_ptr parent_scope) { VLOG(3) << "Predictor::init()"; - +#if !defined(_WIN32) if (FLAGS_profile) { LOG(WARNING) << "Profiler is actived, might affect the performance"; LOG(INFO) << "You can turn off by set gflags '-profile false'"; @@ -89,6 +70,7 @@ bool NativePaddlePredictor::Init( : platform::ProfilerState::kCPU; platform::EnableProfiler(tracking_device); } +#endif if (config_.use_gpu) { place_ = paddle::platform::CUDAPlace(config_.device); @@ -133,10 +115,12 @@ bool NativePaddlePredictor::Init( } NativePaddlePredictor::~NativePaddlePredictor() { +#if !defined(_WIN32) if (FLAGS_profile) { platform::DisableProfiler(platform::EventSortingKey::kTotal, "./profile.log"); } +#endif if (sub_scope_) { scope_->DeleteScope(sub_scope_); } @@ -179,8 +163,13 @@ std::unique_ptr NativePaddlePredictor::Clone() { LOG(ERROR) << "fail to call Init"; return nullptr; } +#ifdef __clang__ + // fix clang compile error + return cls; +#else // fix manylinux compile error. return std::move(cls); +#endif } bool NativePaddlePredictor::SetFeed(const std::vector &inputs, @@ -329,7 +318,12 @@ std::unique_ptr CreatePaddlePredictor< if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; } +#ifdef __clang__ + // fix clang compile error + return predictor; +#else return std::move(predictor); +#endif } } // namespace paddle diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index a697218377e1e661dccc8d8a4c78f15b5c211243..afb46a7139f6ab8e6b3697fdc56fe1c78a05cd64 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -3,6 +3,11 @@ cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +if (WIN32) +set(CMAKE_STATIC_LIBRARY_PREFIX "lib") +else() +set(CMAKE_STATIC_LIBRARY_PREFIX "") +endif() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") @@ -32,44 +37,56 @@ endif(NOT WIN32) include_directories("${PADDLE_LIB}/third_party/boost") include_directories("${PADDLE_LIB}/third_party/eigen3") +if (NOT WIN32) link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") +link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") +endif(NOT WIN32) + link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") link_directories("${PADDLE_LIB}/third_party/install/glog/lib") link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") -link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") +link_directories("${PADDLE_LIB}/paddle/fluid/inference") add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if(WITH_MKL) include_directories("${PADDLE_LIB}/third_party/install/mklml/include") - set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so - ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5.so) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif() else() - set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a if(WITH_STATIC_LIB) set(DEPS - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a) + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so) + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() -set(EXTERNAL_LIB "-lrt -ldl -lpthread") +if (NOT WIN32) +set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf snappystream snappy z ${EXTERNAL_LIB}) +else() +set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} + ${CMAKE_STATIC_LIBRARY_PREFIX}glog ${CMAKE_STATIC_LIBRARY_PREFIX}gflags ${CMAKE_STATIC_LIBRARY_PREFIX}protobuf + ${EXTERNAL_LIB}) +endif(NOT WIN32) + if(WITH_GPU) - set(DEPS ${DEPS} ${CUDA_LIB}/libcudart.so) + set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() target_link_libraries(${DEMO_NAME} ${DEPS}) diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 8eac449a1081330877eac6d4f40c064533b0acab..bdc9a15d543818da94ac2acf34ecabbbbae3291e 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -20,30 +20,11 @@ #include #include #include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/inference/api/timer.h" namespace paddle { namespace inference { -// Timer for timer -class Timer { - public: - double start; - double startu; - void tic() { - struct timeval tp; - gettimeofday(&tp, NULL); - start = tp.tv_sec; - startu = tp.tv_usec; - } - double toc() { - struct timeval tp; - gettimeofday(&tp, NULL); - double used_time_ms = - (tp.tv_sec - start) * 1000.0 + (tp.tv_usec - startu) / 1000.0; - return used_time_ms; - } -}; - static void split(const std::string &str, char sep, std::vector *pieces) { pieces->clear(); diff --git a/paddle/fluid/inference/api/timer.h b/paddle/fluid/inference/api/timer.h new file mode 100644 index 0000000000000000000000000000000000000000..2df5274dc1f2e7ad8e434f1da9d5ae6aee94c784 --- /dev/null +++ b/paddle/fluid/inference/api/timer.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include // NOLINT + +namespace paddle { +namespace inference { + +// Timer for timer +class Timer { + public: + std::chrono::high_resolution_clock::time_point start; + std::chrono::high_resolution_clock::time_point startu; + + void tic() { start = std::chrono::high_resolution_clock::now(); } + double toc() { + startu = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_span = + std::chrono::duration_cast>(startu - + start); + double used_time_ms = static_cast(time_span.count()) * 1000.0; + return used_time_ms; + } +}; + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 2f8fadcefe5d5b6f428092914c7e999ec7524862..7ec1e78da4ec642cb1e6248edfbcfed748fa11b8 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -178,6 +178,8 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(relu);\n") elseif(${TARGET} STREQUAL "fake_dequantize") file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n") + elseif(${TARGET} STREQUAL "fake_quantize") + file(APPEND ${pybind_file} "USE_OP(fake_quantize_abs_max);\n") elseif(${TARGET} STREQUAL "tensorrt_engine_op") message(STATUS "Pybind skips [tensorrt_engine_op], for this OP is only used in inference") elseif(${TARGET} STREQUAL "fc") @@ -293,6 +295,7 @@ op_library(extract_rows_op DEPS memory) op_library(flatten_op DEPS reshape_op) op_library(sequence_pad_op DEPS sequence_padding) op_library(unstack_op DEPS stack_op) +op_library(fake_quantize_op DEPS memory) if (WITH_GPU) op_library(conv_op DEPS vol2col depthwise_conv im2col) diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 10d678111f5325e495b24286e6ecf651230393fe..b6cb935814e25b31d4104f9ce24fe952680cb491 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -74,7 +74,7 @@ struct BeamSearchDecodeFunctor { } template - void operator()() const; + void apply() const; bool tensor_on_gpu_; size_t beam_size_; @@ -88,7 +88,7 @@ struct BeamSearchDecodeFunctor { }; template -void BeamSearchDecodeFunctor::operator()() const { +void BeamSearchDecodeFunctor::apply() const { BeamSearchDecoder beam_search_decoder(beam_size_, end_id_); // Check if the tensor is on GPU. If so, use the CPU copy instead if (tensor_on_gpu_) { @@ -101,7 +101,7 @@ void BeamSearchDecodeFunctor::operator()() const { } template <> -void BeamSearchDecodeFunctor::operator()() const { +void BeamSearchDecodeFunctor::apply() const { PADDLE_THROW("beam search decode op does not support bool!"); } diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 6220e57f5941d89cbf0aea268b85ad55af6132cc..8fa0416049f8fa128d7ab61f8350b41960f07263 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -37,7 +37,7 @@ struct CastOpFunctor { : in_(in), out_(out), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* in_begin = in_->data(); auto numel = in_->numel(); auto* in_end = in_begin + numel; diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 22cbf680c0670552fb014043c69fcadc56863529..4a7a6bcf7154d5680de751e3c933be46fb09fd74 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -118,7 +118,6 @@ class CUDNNConvOpKernel : public framework::OpKernel { output_channels / groups * output_height * output_width * output_depth; int group_offset_filter = filter->numel() / groups; // ------------------- cudnn conv workspace --------------------- - void* cudnn_workspace = nullptr; size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = kCONV_CUDNN_WORKSPACE_LIMIT_BYTES; if (user_workspace_size > 0) { @@ -159,20 +158,18 @@ class CUDNNConvOpKernel : public framework::OpKernel { PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, "workspace_size to be allocated exceeds the limit"); - // Allocate on GPU memory - platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); - cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv forward --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; for (int i = 0; i < groups; i++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( - handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, - cudnn_filter_desc, filter_data + i * group_offset_filter, - cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, - &beta, cudnn_output_desc, output_data + i * group_offset_out)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, + cudnn_filter_desc, filter_data + i * group_offset_filter, + cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, + &beta, cudnn_output_desc, output_data + i * group_offset_out)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } - // Release the cudnn workspace - paddle::memory::Free(gpu, cudnn_workspace); } }; @@ -314,11 +311,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { cudnn_filter_desc, filter_algo, &tmp_size)); workspace_size_in_bytes = std::max(workspace_size_in_bytes, tmp_size); } - // ------------------- cudnn conv workspace --------------------- - // Already on GPU - void* cudnn_workspace = nullptr; - platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); - cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); + // ------------------- cudnn conv backward data --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; if (input_grad) { @@ -326,12 +319,15 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset input_grad. for (int i = 0; i < groups; i++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, cudnn_filter_desc, - filter_data + i * group_offset_filter, cudnn_output_grad_desc, - output_grad_data + i * group_offset_out, cudnn_conv_desc, data_algo, - cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, - input_grad_data + i * group_offset_in)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, cudnn_filter_desc, + filter_data + i * group_offset_filter, cudnn_output_grad_desc, + output_grad_data + i * group_offset_out, cudnn_conv_desc, + data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, + cudnn_input_desc, input_grad_data + i * group_offset_in)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } } // ------------------- cudnn conv backward filter --------------------- @@ -339,16 +335,17 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T* filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. for (int i = 0; i < groups; i++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, - cudnn_output_grad_desc, output_grad_data + i * group_offset_out, - cudnn_conv_desc, filter_algo, cudnn_workspace, - workspace_size_in_bytes, &beta, cudnn_filter_desc, - filter_grad_data + i * group_offset_filter)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, cudnn_input_desc, + input_data + i * group_offset_in, cudnn_output_grad_desc, + output_grad_data + i * group_offset_out, cudnn_conv_desc, + filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, + cudnn_filter_desc, filter_grad_data + i * group_offset_filter)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } } - // Release the cudnn workspace - paddle::memory::Free(gpu, cudnn_workspace); } }; diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 82fff68e7557b3f0b44e6faf2a50e5a0ecbba589..73831611d01b8c5b8d2d9f7f15634a0094e4a608 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -76,7 +76,6 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { conv_desc.descriptor(paddings, strides, dilations); // ------------------- cudnn conv workspace --------------------- - void* cudnn_workspace = nullptr; size_t workspace_size_in_bytes; // final workspace to allocate. size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes; if (user_workspace_size > 0) { @@ -100,25 +99,21 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); - // Allocate on GPU memory - platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); - cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); - // ------------------- cudnn conv transpose forward --------------------- int input_offset = input->numel() / input->dims()[0] / groups; int output_offset = output->numel() / output->dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; for (int g = 0; g < groups; g++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, cudnn_filter_desc, filter_data + filter_offset * g, - cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, - algo, cudnn_workspace, workspace_size_in_bytes, &beta, - cudnn_output_desc, output_data + output_offset * g)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, cudnn_filter_desc, filter_data + filter_offset * g, + cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, + algo, cudnn_workspace, workspace_size_in_bytes, &beta, + cudnn_output_desc, output_data + output_offset * g)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } - - // Release the cudnn workspace - paddle::memory::Free(gpu, cudnn_workspace); } }; @@ -206,11 +201,6 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { std::max(workspace_size_in_bytes, bwd_filter_ws_size); } - // ------------------- cudnn conv workspace --------------------- - // Already on GPU - void* cudnn_workspace = nullptr; - platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); - cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- // FIXME(typhoonzero): template type T may not be the same as cudnn call. int input_offset = input->numel() / input->dims()[0] / groups; @@ -222,12 +212,15 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. for (int g = 0; g < groups; g++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( - handle, &alpha, cudnn_output_desc, - output_grad_data + output_grad_offset * g, cudnn_filter_desc, - filter_data + filter_offset * g, cudnn_conv_desc, data_algo, - cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, - input_grad_data + input_offset * g)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, cudnn_output_desc, + output_grad_data + output_grad_offset * g, cudnn_filter_desc, + filter_data + filter_offset * g, cudnn_conv_desc, data_algo, + cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, + input_grad_data + input_offset * g)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } } @@ -237,17 +230,17 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset filter_grad. // Gradient with respect to the filter for (int g = 0; g < groups; g++) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, cudnn_output_desc, - output_grad_data + output_grad_offset * g, cudnn_input_desc, - input_data + input_offset * g, cudnn_conv_desc, filter_algo, - cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, - filter_grad_data + filter_offset * g)); + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, cudnn_output_desc, + output_grad_data + output_grad_offset * g, cudnn_input_desc, + input_data + input_offset * g, cudnn_conv_desc, filter_algo, + cudnn_workspace, workspace_size_in_bytes, &beta, + cudnn_filter_desc, filter_grad_data + filter_offset * g)); + }; + dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); } } - - // Release the cudnn workspace - paddle::memory::Free(gpu, cudnn_workspace); } }; diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index d29b0153389574de8992b93ac6795e91556af870..fcdcafae7273afa6887ee531dfc37ef833b92d68 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -33,7 +33,7 @@ struct AppendProposalsFunctor { : out_(out), offset_(offset), to_add_(to_add) {} template - void operator()() const { + void apply() const { auto *out_data = out_->data(); auto *to_add_data = to_add_->data(); memcpy(out_data + offset_, to_add_data, to_add_->numel() * sizeof(T)); diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index a91e0f520e93c01bc5af09b691af2d5a6deda9f2..e608eba05d5680254835f7b25f53d6a59e310e2a 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -14,86 +14,198 @@ limitations under the License. */ #include "paddle/fluid/operators/fake_quantize_op.h" #include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/operators/clip_op.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { -class FakeQuantizeOp : public framework::OperatorWithKernel { +template +using EigenVectorArrayMap = + Eigen::TensorMap>; + +template +using ConstEigenVectorArrayMap = + Eigen::TensorMap>; + +template +struct FindAbsMaxFunctor { + void operator()(const platform::CPUDeviceContext& ctx, const T* in, + const int num, T* out) { + Eigen::DSizes idim(num); + Eigen::DSizes odim(1); + Eigen::TensorMap> in_e(in, idim); + Eigen::TensorMap> out_e(out, odim); + + out_e = in_e.abs().maximum(); + } +}; + +template struct FindAbsMaxFunctor; + +template +struct ClipAndFakeQuantFunctor { + void operator()(const platform::CPUDeviceContext& ctx, + const framework::Tensor& in, const framework::Tensor& scale, + const int bin_cnt, framework::Tensor* out) { + T s = scale.data()[0]; + platform::Transform trans; + trans(ctx, in.data(), in.data() + in.numel(), + out->mutable_data(ctx.GetPlace()), ClipFunctor(-s, s)); + auto in_e = framework::EigenVector::Flatten(in); + auto out_e = framework::EigenVector::Flatten(*out); + + out_e.device(*ctx.eigen_device()) = (bin_cnt / s * in_e).round(); + } +}; + +template struct ClipAndFakeQuantFunctor; + +template +struct FindRangeAbsMaxFunctor { + void operator()(const platform::CPUDeviceContext& ctx, + const framework::Tensor& cur_scale, + const framework::Tensor& last_scale, + const framework::Tensor& iter, const int window_size, + framework::Tensor* scales_arr, framework::Tensor* out_scale) { + T* scale_arr = scales_arr->mutable_data(ctx.GetPlace()); + int64_t it = iter.data()[0]; + int idx = it % window_size; + T removed = scale_arr[idx]; + T cur = cur_scale.data()[0]; + scale_arr[idx] = cur; + + T max = last_scale.data()[0]; + if (max < cur) { + max = cur; + } else if (fabs(removed - max) < 1e-6) { + int size = (it > window_size) ? window_size : it; + FindAbsMaxFunctor()(ctx, scale_arr, size, + &max); + } + out_scale->mutable_data(ctx.GetPlace())[0] = max; + } +}; + +template struct FindRangeAbsMaxFunctor; + +class FakeQuantizeAbsMaxOp : public framework::OperatorWithKernel { public: - FakeQuantizeOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) + FakeQuantizeAbsMaxOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} - void InferShape(framework::InferShapeContext *ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FakeQuantizeOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of FakeQuantizeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutMovingScale"), - "OutMovingScale(Out) of FakeQuantizeOp should not be null"); - // if (ctx->HasInput("InMovingScale")) { - ctx->SetOutputDim("OutMovingScale", ctx->GetInputDim("InMovingScale")); - //} - // if (ctx->HasInput("InScales")) { - PADDLE_ENFORCE(ctx->HasOutput("OutScales"), - "OutScales(Out) of FakeQuantizeOp should not be null"); - ctx->SetOutputDim("OutScales", ctx->GetInputDim("InScales")); - // PADDLE_ENFORCE_EQ(ctx->Inputs("InScales")[0], - // ctx->Outputs("OutScales")[0], - // "Mean and MeanOut should share the same memory"); - //} + PADDLE_ENFORCE(ctx->HasOutput("OutScale"), + "Output(Scale) of FakeQuantizeOp should not be null."); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->SetOutputDim("OutScale", {1}); ctx->ShareLoD("X", /*->*/ "Out"); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } }; -class FakeQuantizeOpMaker : public framework::OpProtoAndCheckerMaker { +class FakeQuantizeAbsMaxOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(Tensor) Input tensor of scale operator."); - AddInput("InScales", "(Tensor) scale buffer, used in static quantization.") - .AsDispensable(); - AddInput("InMovingScale", "Last scale, used in static quantization.") - .AsDispensable(); - AddInput("InCurrentIter", - "Last iteration number, used in static quantization.") - .AsDispensable(); - AddOutput("Out", "(Tensor) Output of quantized low level tensor."); - AddOutput("OutScales", - "(Tensor) scale buffer, used in static quantization.") - .AsDispensable(); - AddOutput("OutMovingScale", " Current scale"); - AddOutput("OutCurrentIter", "Current iteration number.").AsDispensable(); - AddAttr("quantize_type", - "(string, default abs_max)" - "The scaling tpe of the quantize operator.") - .SetDefault("abs_max"); - AddAttr("window_size", "(int, default 10000)").SetDefault(10000); + AddInput("X", "(Tensor) Input is float data type."); + AddOutput("Out", + "(Tensor) Output of quantized low level tensor, " + "but also saved as float data type."); + AddOutput("OutScale", "(Tensor) Current scale"); AddAttr("bit_length", "(int, default 8)") .SetDefault(8) - .AddCustomChecker([](const int &bit_length) { + .AddCustomChecker([](const int& bit_length) { PADDLE_ENFORCE(bit_length >= 1 && bit_length <= 16, "'bit_length' should be between 1 and 16."); }); - AddAttr("is_test", "").SetDefault(false); AddComment(R"DOC( FakeQuantize operator -quantize_type = abs_max: +$$scale = max(abs(X))$$ +$$range = 2^{bit_length - 1} - 1$$ +$$Out = round(X/scale * range)$$ - $$scale = max(abs(x))$$ +)DOC"); + } +}; -quantize_type = range_abs_max: +class FakeQuantizeRangeAbsMaxOp : public framework::OperatorWithKernel { + public: + FakeQuantizeRangeAbsMaxOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} - $$scale = max(max(abs(x)), history_abs_max)$$ + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of FakeQuantizeRangeAbsMaxOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Out"), + "Output(Out) of FakeQuantizeRangeAbsMaxOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("OutScale"), + "Output(OutScale) of FakeQuantizeRangeAbsMaxOp should not be null"); + if (ctx->HasOutput("OutScales")) { + int window_size = ctx->Attrs().Get("window_size"); + ctx->SetOutputDim("OutScales", {window_size}); + } + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->SetOutputDim("OutScale", {1}); + ctx->ShareLoD("X", /*->*/ "Out"); + } -quantize_type = moving_average_abs_max: + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; - $$scale = 0.1*scale+0.9*new_abs_max)$$ +class FakeQuantizeRangeAbsMaxOpMaker + : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) Input is float data type."); + AddInput("InScale", "Last scale."); + AddInput("Iter", "Global step iteration.").AsDispensable(); + AddOutput("Out", "(Tensor) Output of quantized low level tensor."); + AddOutput("OutScale", " Current scale"); + AddOutput("OutScales", "(Tensor) scale buffer.").AsDispensable(); + AddAttr("window_size", "(int, default 10000) window range size.") + .SetDefault(10000); + AddAttr("bit_length", "(int, default 8), quantization bit number.") + .SetDefault(8) + .AddCustomChecker([](const int& bit_length) { + PADDLE_ENFORCE(bit_length >= 1 && bit_length <= 16, + "'bit_length' should be between 1 and 16."); + }); + AddAttr("is_test", "").SetDefault(false); + AddComment(R"DOC( +FakeQuantize operator is used in static quantization. -$$Out = scale*X$$ +$$scale = max(max(abs(x)), history_abs_max)$$ +$$range = 2^{bit_length - 1} - 1$$ +$$Out = round(X/scale * range)$$ )DOC"); } @@ -103,10 +215,16 @@ $$Out = scale*X$$ } // namespace paddle namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; + +REGISTER_OPERATOR(fake_quantize_abs_max, ops::FakeQuantizeAbsMaxOp, + ops::FakeQuantizeAbsMaxOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(fake_quantize_abs_max, + ops::FakeQuantizeAbsMaxKernel); -REGISTER_OPERATOR(fake_quantize, ops::FakeQuantizeOp, ops::FakeQuantizeOpMaker, +REGISTER_OPERATOR(fake_quantize_range_abs_max, ops::FakeQuantizeRangeAbsMaxOp, + ops::FakeQuantizeRangeAbsMaxOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - fake_quantize, - ops::FakeQuantizeKernel, - ops::FakeQuantizeKernel); +REGISTER_OP_CPU_KERNEL(fake_quantize_range_abs_max, + ops::FakeQuantizeRangeAbsMaxKernel); diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu index be0c6730a5119090600a27c66510b2a095c54583..7c65d6dba7d67b5d31720bae1f4877dd22210138 100644 --- a/paddle/fluid/operators/fake_quantize_op.cu +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/fake_quantize_op.h" #include "paddle/fluid/platform/cuda_primitives.h" @@ -20,7 +21,7 @@ namespace paddle { namespace operators { template -__global__ void FindAbsMaxKernel(const int n, const T* in, T* out) { +__global__ void FindAbsMaxKernel(const T* in, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; @@ -43,7 +44,7 @@ __global__ void FindAbsMaxKernel(const int n, const T* in, T* out) { __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { - if (tid < i && shared_max_data[tid] < shared_max_data[tid + i]) { + if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) { shared_max_data[tid] = shared_max_data[tid + i]; } __syncthreads(); @@ -53,220 +54,124 @@ __global__ void FindAbsMaxKernel(const int n, const T* in, T* out) { } } -float FindAbsMaxGpu(const platform::CUDADeviceContext& ctx, const float* array, - int length) { - float host_max; - int kNumTheads = 1024; - int gridDimx = (kNumTheads - 1 + length) / kNumTheads; - gridDimx = (gridDimx > kNumTheads) ? kNumTheads : gridDimx; - framework::Tensor t; - float* device_max = t.mutable_data(framework::make_ddim({gridDimx}), - platform::CUDAPlace()); - FindAbsMaxKernel<<>>(length, array, device_max); - FindAbsMaxKernel< - float><<<1, kNumTheads, kNumTheads * sizeof(float), ctx.stream()>>>( - gridDimx, device_max, device_max); - PADDLE_ENFORCE_EQ( - cudaMemcpy(&host_max, device_max, sizeof(float), cudaMemcpyDeviceToHost), - cudaSuccess, "cudaMemcpy failed"); - return host_max; -} +template +struct FindAbsMaxFunctor { + void operator()(const platform::CUDADeviceContext& ctx, const T* in, + const int num, T* out) { + int block = 1024; + int grid = (block - 1 + num) / block; + grid = (grid > block) ? block : grid; + + framework::Tensor max; + T* max_data = + max.mutable_data(framework::make_ddim({grid}), ctx.GetPlace()); + FindAbsMaxKernel<<>>( + in, num, max_data); + FindAbsMaxKernel<<<1, block, 1024 * sizeof(T), ctx.stream()>>>( + max_data, grid, out); + } +}; + +template struct FindAbsMaxFunctor; template -__global__ void ApplySaturateKernel(const int n, const T* in, T* out, - int* num_saturate, const T min, - const T max) { +__global__ void ClipAndQuantKernel(const T* in, const T* scale, + const int bin_cnt, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; - extern __shared__ int shared_count[]; - shared_count[tid] = 0; + T s = scale[0]; for (int i = bid; i < n; i += blockDim.x * gridDim.x) { - if (in[i] > max) { - out[i] = max; - shared_count[tid] += 1; - } else if (in[i] < min) { - out[i] = min; - shared_count[tid] += 1; - } else { - out[i] = in[i]; - } - } - __syncthreads(); - - for (int i = blockDim.x / 2; i > 0; i >>= 1) { - if (tid < i) { - shared_count[tid] += shared_count[tid + i]; - } - __syncthreads(); - } - if (tid == 0) { - num_saturate[blockIdx.x] = shared_count[0]; + T x = in[bid]; + T v = x > s ? s : x; + v = v < -s ? -s : v; + v = bin_cnt / s * v; + out[bid] = round(v); } } template -__global__ void ReduceKernel(const int n, const T* in, T* out) { - int tid = threadIdx.x; - extern __shared__ T shared_sum[]; - if (tid < n) { - shared_sum[tid] = in[tid]; +__global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale, + const T* last_scale, + const int64_t* iter, + const int window_size, T* scale_arr, + T* out_scale, int* need_find_max, + int* out_size) { + int it = iter[0]; + int idx = it % window_size; + T removed = scale_arr[idx]; + T cur = cur_scale[0]; + scale_arr[idx] = cur; + T max = last_scale[0]; + out_scale[0] = max < cur ? cur : max; + if (fabs(removed - max) < 1e-6) { + need_find_max[0] = 1; + out_size[0] = it > window_size ? window_size : it; } else { - shared_sum[tid] = T(0); - } - __syncthreads(); - // blockDim.x must >= n - for (int i = (n + 1) / 2; i > 0; i >>= 1) { - if (tid < i) { - shared_sum[tid] += shared_sum[tid + i]; - } - __syncthreads(); - } - if (tid == 0) { - out[0] = shared_sum[0]; + need_find_max[0] = 0; } } template -int ApplySaturateGpu(const platform::CUDADeviceContext& ctx, const int n, - const T* in, T* out, const T min, const T max) { - int host_num_saturate; - int kNumTheads = 1024; - int gridDimx = (n + kNumTheads - 1) / kNumTheads; - gridDimx = (gridDimx > kNumTheads) ? kNumTheads : gridDimx; - framework::Tensor t; - int* device_num_saturate = t.mutable_data( - framework::make_ddim({gridDimx}), platform::CUDAPlace()); - ApplySaturateKernel< - T><<>>( - n, in, out, device_num_saturate, min, max); - ReduceKernel<<<1, kNumTheads, kNumTheads * sizeof(T), ctx.stream()>>>( - gridDimx, device_num_saturate, device_num_saturate); - PADDLE_ENFORCE_EQ(cudaSuccess, - cudaMemcpy(&host_num_saturate, device_num_saturate, - sizeof(int), cudaMemcpyDeviceToHost), - "cudaMemcpy failed"); - return host_num_saturate; -} - -template -class FakeQuantizeCUDAKernel : public framework::OpKernel { - public: - T FindRangeAbsMax(const platform::CUDADeviceContext& ctx, - framework::Tensor* scale_list, framework::Tensor* out_scale, - const T& cur_scale, int window_size, - int current_iter) const { - T* sl = scale_list->mutable_data(platform::CPUPlace()); - T remove_tmp = sl[current_iter]; - sl[current_iter] = cur_scale; - T& max_scale = out_scale->mutable_data(platform::CPUPlace())[0]; - if (max_scale < cur_scale) { - max_scale = cur_scale; - } else if (fabs(remove_tmp - max_scale) < 1e-6) { - int size = (current_iter > window_size) ? window_size : current_iter; - max_scale = T(FindAbsMaxGpu(ctx, scale_list->data(), size)); +struct FindRangeAbsMaxFunctor { + void operator()(const platform::CUDADeviceContext& ctx, + const framework::Tensor& cur_scale, + const framework::Tensor& last_scale, + const framework::Tensor& iter, const int window_size, + framework::Tensor* scales_arr, framework::Tensor* out_scale) { + auto& gpu_place = boost::get(ctx.GetPlace()); + T* scale_arr = scales_arr->mutable_data(gpu_place); + T* out_scale_data = out_scale->mutable_data(gpu_place); + + framework::Tensor need_find_max, out_size; + int* find_max = need_find_max.mutable_data(gpu_place); + int* out_size_data = out_size.mutable_data(gpu_place); + + FindRangeAbsMaxAndFillArray<<<1, 1, 0, ctx.stream()>>>( + cur_scale.data(), last_scale.data(), iter.data(), + window_size, scale_arr, out_scale_data, find_max, out_size_data); + + int g_find_max; + memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max, + sizeof(int), 0); + if (g_find_max) { + int len; + memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data, + sizeof(int), 0); + FindAbsMaxFunctor()(ctx, scale_arr, len, + out_scale_data); } - return max_scale; - } - - T FindMovingAverageAbsMmax(framework::Tensor* in_scale, - framework::Tensor* out_scale, - const T& cur_scale) const { - T* ins = in_scale->mutable_data(platform::CPUPlace()); - T* outs = out_scale->mutable_data(platform::CPUPlace()); - outs[0] = 0.9 * cur_scale + 0.1 * ins[0]; - return T(outs[0]); } +}; - virtual void Compute(const framework::ExecutionContext& context) const { - PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), - "This kernel only runs on GPU device."); - auto& device_ctx = context.cuda_device_context(); - auto* tensor = context.Output("Out"); - auto* in = context.Input("X"); - const bool is_test = context.Attr("is_test"); - tensor->mutable_data(in->place()); - context.Output("OutMovingScale") - ->mutable_data( - context.Input("InMovingScale")->place()); - auto quantize_type = - static_cast(context.Attr("quantize_type")); - if (quantize_type == std::string("range_abs_max")) { - context.Output("OutScales") - ->mutable_data( - context.Input("InScales")->place()); - context.Output("OutCurrentIter") - ->mutable_data( - context.Input("InCurrentIter")->place()); - } - - T scale = T(1); - int window_size = context.Attr("window_size"); - T bin_cnt = (T)((1 << (context.Attr("bit_length") - 1)) - 1); - if (quantize_type == std::string("abs_max")) { - auto* saving_scale = context.Output("OutMovingScale"); - scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); - saving_scale->mutable_data(platform::CPUPlace())[0] = scale; - - auto& device_ctx = context.template device_context(); - auto* scale_list = context.Output("OutScales"); - math::SetConstant scalar; - scale_list->mutable_data(context.GetPlace()); - scalar(device_ctx, scale_list, static_cast(0)); - auto* iter = context.Output("OutCurrentIter"); - iter->mutable_data(context.GetPlace()); - scalar(device_ctx, iter, static_cast(0)); - } else if (quantize_type == std::string("range_abs_max")) { - auto* moving_scale = const_cast( - context.Input("InMovingScale")); - if (is_test) { - scale = moving_scale->mutable_data(platform::CPUPlace())[0]; - } else { - auto* it = const_cast( - context.Input("InCurrentIter")); - auto* iter = context.Output("OutCurrentIter"); - int* last_iter = it->mutable_data(platform::CPUPlace()); - int* current_iter = iter->mutable_data(platform::CPUPlace()); - auto* scale_list = context.Output("OutScales"); - auto* saving_scale = - context.Output("OutMovingScale"); - scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); - scale = FindRangeAbsMax(device_ctx, scale_list, saving_scale, scale, - window_size, current_iter[0]); - (*current_iter) = (*last_iter) + 1; - } - } else if (quantize_type == std::string("moving_average_abs_max")) { - auto* moving_scale = const_cast( - context.Input("InMovingScale")); - if (is_test) { - scale = moving_scale->mutable_data(platform::CPUPlace())[0]; - } else { - scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); - auto* saving_scale = - context.Output("OutMovingScale"); - scale = FindMovingAverageAbsMmax( - const_cast(moving_scale), saving_scale, scale); - } - } - - ApplySaturateGpu(device_ctx, in->numel(), in->data(), - tensor->mutable_data(in->place()), -scale, scale); - scale = bin_cnt / scale; +template struct FindRangeAbsMaxFunctor; - auto& dev = - *context.template device_context().eigen_device(); - auto eigen_out = framework::EigenVector::Flatten(*tensor); - auto eigen_in = framework::EigenVector::Flatten(*tensor); - eigen_out.device(dev) = (scale * eigen_in).round(); +template +struct ClipAndFakeQuantFunctor { + void operator()(const platform::CUDADeviceContext& ctx, + const framework::Tensor& in, const framework::Tensor& scale, + const int bin_cnt, framework::Tensor* out) { + int num = in.numel(); + int block = 1024; + int grid = (block - 1 + num) / block; + + const T* in_data = in.data(); + const T* scale_data = scale.data(); + T* out_data = out->mutable_data(ctx.GetPlace()); + + ClipAndQuantKernel<<>>( + in_data, scale_data, bin_cnt, num, out_data); } }; +template struct ClipAndFakeQuantFunctor; + } // namespace operators } // namespace paddle -REGISTER_OP_CUDA_KERNEL(fake_quantize, - paddle::operators::FakeQuantizeCUDAKernel< - paddle::platform::CUDADeviceContext, float>, - paddle::operators::FakeQuantizeCUDAKernel< - paddle::platform::CUDADeviceContext, double>); +namespace ops = paddle::operators; +using CUDA = paddle::platform::CUDADeviceContext; +REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max, + ops::FakeQuantizeAbsMaxKernel); +REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max, + ops::FakeQuantizeRangeAbsMaxKernel); diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index 80f71d85dde39f773cc489fb79effcc775c5010a..7ace7573ec5c03ab8788cfc0aab614b7f80ea073 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -17,137 +17,91 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/clip_op.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { -using platform::Transform; +template +struct FindAbsMaxFunctor { + void operator()(const DeviceContext& ctx, const T* in, const int num, T* out); +}; template -class FakeQuantizeKernel : public framework::OpKernel { +struct ClipAndFakeQuantFunctor { + void operator()(const DeviceContext& ctx, const framework::Tensor& in, + const framework::Tensor& scale, const int bin_cnt, + framework::Tensor* out); +}; + +template +struct FindRangeAbsMaxFunctor { + void operator()(const DeviceContext& ctx, const framework::Tensor& cur_scale, + const framework::Tensor& last_scale, + const framework::Tensor& iter, const int window_size, + framework::Tensor* scales_arr, framework::Tensor* out_scale); +}; + +template +class FakeQuantizeAbsMaxKernel : public framework::OpKernel { public: - T FindAbsMax(framework::Tensor* in, int n) const { - T* p = in->mutable_data(platform::CPUPlace()); - T abs_max = (T)0.00000001; - for (int i = 0; i < n; i++) { - T tmp = fabs(p[i]); - if (tmp > abs_max) abs_max = tmp; - } - return T(abs_max); - } - T FindRangeAbsMax(framework::Tensor* scale_list, framework::Tensor* out_scale, - const T& cur_scale, int window_size, - int current_iter) const { - T* sl = scale_list->mutable_data(platform::CPUPlace()); - T remove_tmp = sl[current_iter]; - sl[current_iter] = cur_scale; - T& max_scale = out_scale->mutable_data(platform::CPUPlace())[0]; - if (max_scale < cur_scale) { - max_scale = cur_scale; - } else if (fabs(remove_tmp - max_scale) < 1e-6) { - int size = (current_iter > window_size) ? window_size : current_iter; - max_scale = T(FindAbsMax(scale_list, size)); - } - return max_scale; - } + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); - T FindMovingAverageAbsMmax(framework::Tensor* in_scale, - framework::Tensor* out_scale, - const T& cur_scale) const { - T* ins = in_scale->mutable_data(platform::CPUPlace()); - T* outs = out_scale->mutable_data(platform::CPUPlace()); - outs[0] = 0.9 * cur_scale + 0.1 * ins[0]; - return T(outs[0]); + auto* out = context.Output("Out"); + auto* out_scale = context.Output("OutScale"); + T* out_s = out_scale->mutable_data(context.GetPlace()); + + int bit_length = context.Attr("bit_length"); + int bin_cnt = std::pow(2, bit_length - 1) - 1; + + auto& dev_ctx = context.template device_context(); + const T* in_data = in->data(); + FindAbsMaxFunctor()(dev_ctx, in_data, in->numel(), out_s); + ClipAndFakeQuantFunctor()(dev_ctx, *in, *out_scale, + bin_cnt, out); } +}; - virtual void Compute(const framework::ExecutionContext& context) const { - auto* tensor = context.Output("Out"); +template +class FakeQuantizeRangeAbsMaxKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); - const bool is_test = context.Attr("is_test"); - tensor->mutable_data(in->place()); - - auto* oms_tensor = context.Output("OutMovingScale"); - oms_tensor->mutable_data(in->place()); - - auto quantize_type = - static_cast(context.Attr("quantize_type")); - if (quantize_type == std::string("range_abs_max")) { - auto* oss_tensor = context.Output("OutScales"); - oss_tensor->mutable_data( - context.Input("InScales")->place()); - auto* oci_tensor = context.Output("OutCurrentIter"); - oci_tensor->mutable_data( - context.Input("InCurrentIter")->place()); - } + auto* in_scale = context.Input("InScale"); - T scale = static_cast(1); - int window_size = context.Attr("window_size"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + bool is_test = context.Attr("is_test"); int bit_length = context.Attr("bit_length"); int bin_cnt = std::pow(2, bit_length - 1) - 1; + auto& dev_ctx = context.template device_context(); - auto& dev = - *context.template device_context().eigen_device(); - auto raw_in = framework::EigenVector::Flatten(*in); - if (quantize_type == std::string("abs_max")) { - auto* saving_scale = context.Output("OutMovingScale"); - auto scale_out = framework::EigenVector::Flatten(*saving_scale); - scale_out.device(dev) = raw_in.abs().maximum(); - scale = scale_out(0); - - auto& device_ctx = context.template device_context(); - auto* scale_list = context.Output("OutScales"); - math::SetConstant scalar; - scale_list->mutable_data(context.GetPlace()); - scalar(device_ctx, scale_list, static_cast(0)); - auto* iter = context.Output("OutCurrentIter"); - iter->mutable_data(context.GetPlace()); - scalar(device_ctx, iter, static_cast(0)); - } else if (quantize_type == std::string("range_abs_max")) { - auto* moving_scale = context.Input("InMovingScale"); - if (is_test) { - scale = moving_scale->data()[0]; - } else { - auto* it = context.Input("InCurrentIter"); - auto* iter = context.Output("OutCurrentIter"); - const int* last_iter = it->data(); - int* current_iter = iter->mutable_data(platform::CPUPlace()); - auto* scale_list = context.Output("OutScales"); - auto* saving_scale = - context.Output("OutMovingScale"); - auto scale_out = framework::EigenVector::Flatten(*saving_scale); - scale_out.device(dev) = raw_in.abs().maximum(); - scale = saving_scale->mutable_data(platform::CPUPlace())[0]; - scale = FindRangeAbsMax(scale_list, saving_scale, scale, window_size, - current_iter[0]); - saving_scale->mutable_data(platform::CPUPlace())[0] = scale; - (*current_iter) = (*last_iter) + 1; - } - } else if (quantize_type == std::string("moving_average_abs_max")) { - auto* moving_scale = context.Input("InMovingScale"); - if (is_test) { - scale = moving_scale->data()[0]; - } else { - auto* saving_scale = - context.Output("OutMovingScale"); - auto scale_out = framework::EigenVector::Flatten(*saving_scale); - scale_out.device(dev) = raw_in.abs().maximum(); - scale = saving_scale->mutable_data(platform::CPUPlace())[0]; - scale = FindMovingAverageAbsMmax( - const_cast(moving_scale), saving_scale, scale); - saving_scale->mutable_data(platform::CPUPlace())[0] = scale; - } + // testing + if (is_test) { + ClipAndFakeQuantFunctor()(dev_ctx, *in, *in_scale, + bin_cnt, out); + return; } - Transform trans; - trans(context.template device_context(), in->data(), - in->data() + in->numel(), tensor->mutable_data(in->place()), - ClipFunctor(-scale, scale)); - auto eigen_out = framework::EigenVector::Flatten(*tensor); - auto eigen_in = framework::EigenVector::Flatten(*tensor); - eigen_out.device(dev) = (bin_cnt / scale * eigen_in).round(); + // training + auto* out_scale = context.Output("OutScale"); + auto* out_scales = context.Output("OutScales"); + auto* iter = context.Input("Iter"); + + int window_size = context.Attr("window_size"); + out_scale->mutable_data(context.GetPlace()); + + framework::Tensor cur_scale; + T* cur_scale_data = cur_scale.mutable_data({1}, context.GetPlace()); + FindAbsMaxFunctor()(dev_ctx, in->data(), in->numel(), + cur_scale_data); + FindRangeAbsMaxFunctor()(dev_ctx, cur_scale, *in_scale, + *iter, window_size, out_scales, + out_scale); + ClipAndFakeQuantFunctor()(dev_ctx, *in, *out_scale, + bin_cnt, out); } }; diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 925dc19061e2196a40411f415eb6e5ad59ab52ff..adc7cb1f9e48ba5fabeb91c5e3ecec016db34a45 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -25,7 +25,7 @@ struct FillOpVisitor { : tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { platform::CPUPlace cpu; auto *data = tensor_->mutable_data(cpu); std::transform(value_.data(), value_.data() + tensor_->numel(), data, diff --git a/paddle/fluid/operators/fusion_gru_op.cc b/paddle/fluid/operators/fusion_gru_op.cc index 3a34aa86b6331e4fe2813eea97cb6644323807c3..582c75872ab2818cdf834f9a46278db1d6f91d54 100644 --- a/paddle/fluid/operators/fusion_gru_op.cc +++ b/paddle/fluid/operators/fusion_gru_op.cc @@ -13,16 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fusion_gru_op.h" +#include // for memcpy #include -#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/operators/math/detail/gru_cpu_kernel.h" -#include "paddle/fluid/operators/math/detail/gru_kernel.h" +#include "paddle/fluid/operators/math/cpu_vec.h" #include "paddle/fluid/operators/math/fc_compute.h" -#include "paddle/fluid/operators/math/gru_compute.h" -#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence2batch.h" +#include "paddle/fluid/platform/cpu_info.h" namespace paddle { namespace operators { @@ -35,12 +32,12 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { "Input(WeightH) of GRU should not be null."); PADDLE_ENFORCE(ctx->HasOutput("XX"), "Output(XX) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchedGate"), - "Output(BatchedGate) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchResetHiddenPrev"), - "Output(BatchResetHiddenPrev) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"), - "Output(BatchedHidden) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"), + "Output(ReorderedH0) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"), + "Output(BatchedInput) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchedOut"), + "Output(BatchedOut) of GRU should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Hidden"), "Output(Hidden) of GRU should not be null."); @@ -83,12 +80,16 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { } framework::DDim out_dims({x_dims[0], frame_size}); ctx->SetOutputDim("Hidden", out_dims); - ctx->SetOutputDim("BatchedGate", {x_dims[0], wx_dims[1]}); - ctx->SetOutputDim("BatchedHidden", out_dims); - ctx->SetOutputDim("BatchResetHiddenPrev", out_dims); + ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]}); + ctx->SetOutputDim("BatchedOut", out_dims); ctx->ShareLoD("X", "Hidden"); - int xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; + int xx_width; + if (ctx->Attrs().Get("use_seq")) { + xx_width = wx_dims[1]; + } else { + xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; + } ctx->SetOutputDim("XX", {x_dims[0], xx_width}); ctx->ShareLoD("X", "XX"); } @@ -115,22 +116,29 @@ void FusionGRUOpMaker::Make() { "(Tensor) The FC weight with shape (M x 3D)," "where M is the dim size of x, D is the hidden size. "); AddInput("WeightH", - "(Tensor) (D x 3D) Same as GRUOp, where D is the hidden size. "); + "(Tensor) (D x 3D) Same as GRUOp, where D is the hidden size. " + "This weight is not exactly D x 3D as: {W_update, W_reset, W_state}" + "Acutally they are D x 2D and D x D two part weights." + "{W_update, W_reset; W_state}" + "{D x (D + D); D x D}"); AddInput("Bias", "(Tensor, optional) (1 x 3D)." "Almost same as GRUOp." "Note: if have FC bias it should be added on this bias.") .AsDispensable(); + AddOutput("ReorderedH0", "(Tensor) (N x D), which N is the min-batch size.") + .AsIntermediate(); AddOutput("XX", - "(LoDTensor) the result after X * WeightX (size is T x 4D)" + "(LoDTensor) the result after X * WeightX (size is T x 3D)" " or batched_X (size is T x M), this will be automatically chosen," " where T is the total time steps in this mini-batch," " D is the hidden size, M is the dim size of x input.") .AsIntermediate(); - AddOutput("BatchedGate", "(LoDTensor) Same as GRUOp").AsIntermediate(); - AddOutput("BatchResetHiddenPrev", "(LoDTensor) (T x 3D) Same as GRUOp.") + AddOutput("BatchedInput", + "(LoDTensor) This is the batched result of input X" + "or the batched result after fc, shape (T x 3D)") .AsIntermediate(); - AddOutput("BatchedHidden", "(LoDTensor) (T X D) Same as GRUOp.") + AddOutput("BatchedOut", "(LoDTensor) (T X D) save batched hidden.") .AsIntermediate(); AddOutput("Hidden", "(LoDTensor) (T x D) Same as GRUOp"); AddAttr("activation", @@ -146,6 +154,10 @@ void FusionGRUOpMaker::Make() { "(bool, defalut: False) " "whether to compute reversed GRU.") .SetDefault(false); + AddAttr("use_seq", + "(bool, defalut: True) " + "whether to use seq mode to compute GRU.") + .SetDefault(true); AddComment(R"DOC( The Fusion complete GRU Operator. This operator fuse the fully-connected operator into GRU, @@ -153,172 +165,261 @@ more details can refer to GRU op. )DOC"); } -template -inline void ReorderInitState(const DeviceContext& ctx, - const framework::Tensor& src, - framework::Vector index_lod, - framework::Tensor* dst, bool indexed_src) { - math::CopyMatrixRowsFunctor row_shuffle; - dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, dst, indexed_src); -} - -template +template class FusionGRUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { + if (ctx.Attr("use_seq")) { + SeqCompute(ctx); + } else { + BatchCompute(ctx); + } + } + +#define INIT_VEC_FUNC \ + std::function act_gate, act_state; \ + std::function cross; \ + auto& act_gate_str = ctx.Attr("gate_activation"); \ + auto& act_state_str = ctx.Attr("activation"); \ + if (platform::jit::MayIUse(platform::jit::avx)) { \ + math::VecActivations act_functor; \ + act_gate = act_functor(act_gate_str); \ + act_state = act_functor(act_state_str); \ + cross = math::vec_cross; \ + } else { \ + math::VecActivations act_functor; \ + act_gate = act_functor(act_gate_str); \ + act_state = act_functor(act_state_str); \ + cross = math::vec_cross; \ + } + +#define INIT_BASE_INPUT_OUTPUT \ + auto* h0 = ctx.Input("H0"); \ + auto* wx = ctx.Input("WeightX"); \ + auto* wh = ctx.Input("WeightH"); \ + auto* bias = ctx.Input("Bias"); \ + auto* xx = ctx.Output("XX"); \ + auto* hidden_out = ctx.Output("Hidden"); \ + bool is_reverse = ctx.Attr("is_reverse"); + +#define INIT_BASE_SIZES \ + auto x_dims = x->dims(); /* T x M*/ \ + auto wh_dims = wh->dims(); /* D x 3D*/ \ + const int total_T = x_dims[0]; \ + const int M = x_dims[1]; \ + const int D = wh_dims[0]; \ + const int D3 = wh_dims[1]; \ + const int D2 = D * 2; + + void SeqCompute(const framework::ExecutionContext& ctx) const { + using DeviceContext = paddle::platform::CPUDeviceContext; auto* x = ctx.Input("X"); - auto* wx = ctx.Input("WeightX"); - auto* wh = ctx.Input("WeightH"); - auto* bias = ctx.Input("Bias"); - auto* h0 = ctx.Input("H0"); - - auto* xx = ctx.Output("XX"); - auto* batched_gate = ctx.Output("BatchedGate"); - auto* batch_reset_hidden_prev = - ctx.Output("BatchResetHiddenPrev"); - auto* batch_hidden = ctx.Output("BatchedHidden"); - auto* hidden_out = ctx.Output("Hidden"); - bool is_reverse = ctx.Attr("is_reverse"); + INIT_BASE_INPUT_OUTPUT + INIT_BASE_SIZES + INIT_VEC_FUNC + auto x_lod = x->lod(); + const int N = x_lod[0].size() - 1; + const T* x_data = x->data(); + const T* h0_data = h0 ? h0->data() : nullptr; + const T* wx_data = wx->data(); + const T* wh_data = wh->data(); + const T* wh_state_data = wh_data + D * D2; T* xx_data = xx->mutable_data(ctx.GetPlace()); - T* batched_gate_data = batched_gate->mutable_data(ctx.GetPlace()); - batch_reset_hidden_prev->mutable_data(ctx.GetPlace()); - batch_hidden->mutable_data(ctx.GetPlace()); - hidden_out->mutable_data(ctx.GetPlace()); + T* hidden_out_data = hidden_out->mutable_data(ctx.GetPlace()); + + auto blas = math::GetBlas(ctx); + math::FCCompute(blas, total_T, D3, M, x_data, wx_data, + xx_data, + bias ? bias->data() : nullptr); + + int xx_offset = D3; + int gate_offset = D; + if (is_reverse) { + const int offset = (total_T - 1) * D; + xx_data = xx_data + offset * 3; + hidden_out_data = hidden_out_data + offset; + xx_offset = -D3; + gate_offset = -D; + } + auto move_step = [&]() { + xx_data = xx_data + xx_offset; + hidden_out_data = hidden_out_data + gate_offset; + }; + for (int i = 0; i < N; ++i) { + int bid = is_reverse ? N - 1 - i : i; + int seq_len = x_lod[0][bid + 1] - x_lod[0][bid]; + const T* prev_hidden_data = nullptr; + int tstart = 0; + if (h0_data) { + prev_hidden_data = h0_data + bid * D; + } else { + // W: {W_update, W_reset; W_state} + // update gate + act_gate(D, xx_data, xx_data); + // state gate + act_state(D, xx_data + D2, xx_data + D2); + // out = a*b + blas.VMUL(D, xx_data, xx_data + D2, hidden_out_data); + // save prev + prev_hidden_data = hidden_out_data; + tstart = 1; + move_step(); + } + for (int step = tstart; step < seq_len; ++step) { + // gemm prev * (Wu + Wr) + blas.GEMM(CblasNoTrans, CblasNoTrans, 1, D2, D, static_cast(1), + prev_hidden_data, D, wh_data, D2, static_cast(1), xx_data, + D3); + act_gate(D2, xx_data, xx_data); + // rt = rt*ht_1 inplace result + blas.VMUL(D, prev_hidden_data, xx_data + D, hidden_out_data); + + // gemm rt * Ws + blas.GEMM(CblasNoTrans, CblasNoTrans, 1, D, D, static_cast(1), + hidden_out_data, D, wh_state_data, D, static_cast(1), + xx_data + D2, D3); + act_state(D, xx_data + D2, xx_data + D2); + // out = zt*ht~ + (1-zt)*ht_1 + cross(D, xx_data, xx_data + D2, prev_hidden_data, hidden_out_data); + // save prev + prev_hidden_data = hidden_out_data; + move_step(); + } + } + } + + void BatchCompute(const framework::ExecutionContext& ctx) const { + using DeviceContext = paddle::platform::CPUDeviceContext; + auto* x = ctx.Input("X"); + if (x->lod()[0].size() == 2) { + SeqCompute(ctx); + return; + } + INIT_BASE_INPUT_OUTPUT + INIT_BASE_SIZES + INIT_VEC_FUNC + + auto* reordered_h0 = ctx.Output("ReorderedH0"); + auto* batched_input = ctx.Output("BatchedInput"); + auto* batched_out = ctx.Output("BatchedOut"); const T* x_data = x->data(); const T* wx_data = wx->data(); const T* wh_data = wh->data(); - auto x_dims = x->dims(); - auto wx_dims = wx->dims(); + T* xx_data = xx->mutable_data(ctx.GetPlace()); + T* batched_input_data = batched_input->mutable_data(ctx.GetPlace()); + T* batched_out_data = batched_out->mutable_data(ctx.GetPlace()); + hidden_out->mutable_data(ctx.GetPlace()); + auto& dev_ctx = ctx.template device_context(); auto blas = math::GetBlas(dev_ctx); math::LoDTensor2BatchFunctor to_batch; - if (x_dims[1] > wx_dims[1]) { - math::FCCompute(blas, x_dims[0], wx_dims[1], x_dims[1], - x_data, wx_data, xx_data, - bias ? bias->data() : NULL); - to_batch(dev_ctx, *xx, batched_gate, true, is_reverse); + if (M > D3) { + math::FCCompute(blas, total_T, D3, M, x_data, wx_data, + xx_data, + bias ? bias->data() : nullptr); + to_batch(dev_ctx, *xx, batched_input, true, is_reverse); } else { to_batch(dev_ctx, *x, xx, true, is_reverse); - batched_gate->set_lod(xx->lod()); - math::FCCompute(blas, x_dims[0], wx_dims[1], x_dims[1], - xx_data, wx_data, batched_gate_data, - bias ? bias->data() : NULL); + batched_input->set_lod(xx->lod()); + math::FCCompute(blas, total_T, D3, M, xx_data, wx_data, + batched_input_data, + bias ? bias->data() : nullptr); } - int frame_size = static_cast(wx_dims[1] / 3); - math::GRUMetaValue gru_value; - gru_value.gate_weight = const_cast(wh_data); - gru_value.state_weight = - const_cast(wh_data + 2 * frame_size * frame_size); - Tensor ordered_h0; - - framework::Vector order(batched_gate->lod()[2]); + auto batched_lod = batched_input->lod(); + const auto& seq_order = batched_lod[2]; + const int max_bs = seq_order.size(); + reordered_h0->Resize({max_bs, D}); + int tstart = 0; + T* prev_hidden_data = nullptr; if (h0) { - ReorderInitState( - ctx.template device_context(), *h0, order, &ordered_h0, - true); - gru_value.prev_out_value = ordered_h0.data(); + // reorder h0 + T* reordered_h0_data = reordered_h0->mutable_data(ctx.GetPlace()); + const T* h0_data = h0->data(); + prev_hidden_data = reordered_h0_data; + size_t sz = sizeof(T) * D; + for (int i = 0; i < max_bs; ++i) { + std::memcpy(reordered_h0_data, h0_data + seq_order[i] * D, sz); + reordered_h0_data += D; + } } else { - gru_value.prev_out_value = nullptr; + // compute without h0 + T* cur_in_data = batched_input_data; + T* cur_out_data = batched_out_data; + // W: {W_update, W_reset; W_state} + for (int i = 0; i < max_bs; ++i) { + // update gate + act_gate(D, cur_in_data, cur_in_data); + // state gate + act_state(D, cur_in_data + D2, cur_in_data + D2); + // out = a*b + blas.VMUL(D, cur_in_data, cur_in_data + D2, cur_out_data); + // add offset + cur_in_data += D3; + cur_out_data += D; + } + tstart = 1; + prev_hidden_data = batched_out_data; } - auto batch_starts = batched_gate->lod()[0]; - size_t seq_len = batch_starts.size() - 1; - auto active_node = - math::detail::GetActivationType(ctx.Attr("activation")); - auto active_gate = math::detail::GetActivationType( - ctx.Attr("gate_activation")); - -#ifdef PADDLE_WITH_MKLML - // use MKL packed to speedup GEMM - if (FLAGS_paddle_num_threads >= 4) { - auto blas = math::GetBlas(dev_ctx); - T* packed_gate = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/, - frame_size * 2 /*width of weight*/, - frame_size /*height of height*/); - PADDLE_ENFORCE(packed_gate); - blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size * 2, - frame_size, T(1.0), gru_value.gate_weight, frame_size * 2, - packed_gate); - T* packed_state = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/, - frame_size /*width of weight*/, - frame_size /*height of height*/); - PADDLE_ENFORCE(packed_state); - blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size, - frame_size, T(1.0), gru_value.state_weight, frame_size, - packed_state); - for (size_t n = 0; n < seq_len; n++) { - int bstart = static_cast(batch_starts[n]); - int bend = static_cast(batch_starts[n + 1]); - int cur_batch_size = bend - bstart; - - Tensor gate_t = batched_gate->Slice(bstart, bend); - Tensor reset_hidden_prev_t = - batch_reset_hidden_prev->Slice(bstart, bend); - Tensor hidden_t = batch_hidden->Slice(bstart, bend); - gru_value.output_value = hidden_t.data(); - gru_value.gate_value = gate_t.data(); - gru_value.reset_output_value = reset_hidden_prev_t.data(); - - if (gru_value.prev_out_value) { - blas.GEMM_COMPUTE( - CblasNoTrans, CblasPacked, cur_batch_size, frame_size * 2, - frame_size, gru_value.prev_out_value, frame_size, packed_gate, - frame_size * 2, T(1), gru_value.gate_value, frame_size * 3); - } - - math::detail::forward_reset_output( - math::detail::forward::gru_resetOutput(), gru_value, frame_size, - cur_batch_size, active_gate); - - if (gru_value.prev_out_value) { - blas.GEMM_COMPUTE( - CblasNoTrans, CblasPacked, cur_batch_size, frame_size, frame_size, - gru_value.reset_output_value, frame_size, packed_state, - frame_size, T(1), gru_value.gate_value + frame_size * 2, - frame_size * 3); - } - - math::detail::forward_final_output( - math::detail::forward::gru_finalOutput(), gru_value, frame_size, - cur_batch_size, active_node); - - gru_value.prev_out_value = gru_value.output_value; + // Then start from next + const T* wh_state_data = wh_data + D * D2; + const auto& batch_starts = batched_lod[0]; + const int max_seq_len = batch_starts.size() - 1; + batched_input_data = batched_input_data + tstart * max_bs * D3; + batched_out_data = batched_out_data + tstart * max_bs * D; + for (int step = tstart; step < max_seq_len; ++step) { + const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + // gemm prev * (Wu + Wr) + blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D2, D, static_cast(1), + prev_hidden_data, D, wh_data, D2, static_cast(1), + batched_input_data, D3); + + T* cur_batched_data = batched_input_data; + T* cur_out_data = batched_out_data; + T* cur_prev_hidden_data = prev_hidden_data; + for (int i = 0; i < cur_bs; ++i) { + act_gate(D2, cur_batched_data, cur_batched_data); + // rt = rt*ht_1 inplace result + blas.VMUL(D, cur_prev_hidden_data, cur_batched_data + D, cur_out_data); + + cur_batched_data += D3; + cur_prev_hidden_data += D; + cur_out_data += D; } - blas.GEMM_FREE(packed_gate); - blas.GEMM_FREE(packed_state); - } else { -#endif - for (size_t n = 0; n < seq_len; n++) { - int bstart = static_cast(batch_starts[n]); - int bend = static_cast(batch_starts[n + 1]); - int cur_batch_size = bend - bstart; - - Tensor gate_t = batched_gate->Slice(bstart, bend); - Tensor reset_hidden_prev_t = - batch_reset_hidden_prev->Slice(bstart, bend); - Tensor hidden_t = batch_hidden->Slice(bstart, bend); - gru_value.output_value = hidden_t.data(); - gru_value.gate_value = gate_t.data(); - gru_value.reset_output_value = reset_hidden_prev_t.data(); - - math::GRUUnitFunctor::compute( - dev_ctx, gru_value, frame_size, cur_batch_size, active_node, - active_gate); - - gru_value.prev_out_value = gru_value.output_value; + cur_batched_data = batched_input_data; + cur_out_data = batched_out_data; + blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D, D, static_cast(1), + cur_out_data, D, wh_state_data, D, static_cast(1), + cur_batched_data + D2, D3); + + cur_prev_hidden_data = prev_hidden_data; + for (int i = 0; i < cur_bs; ++i) { + // ht~ = act_state(...) + act_state(D, cur_batched_data + D2, cur_batched_data + D2); + // out = zt*ht~ + (1-zt)*ht_1 + cross(D, cur_batched_data, cur_batched_data + D2, cur_prev_hidden_data, + cur_out_data); + + cur_batched_data += D3; + cur_prev_hidden_data += D; + cur_out_data += D; } -#ifdef PADDLE_WITH_MKLML + prev_hidden_data = batched_out_data; + batched_out_data = cur_out_data; + batched_input_data = cur_batched_data; } -#endif + math::Batch2LoDTensorFunctor to_seq; - batch_hidden->set_lod(batched_gate->lod()); - to_seq(dev_ctx, *batch_hidden, hidden_out); + batched_out->set_lod(batched_lod); + to_seq(dev_ctx, *batched_out, hidden_out); } +#undef INIT_VEC_FUNC +#undef INIT_BASE_SIZES +#undef INIT_BASE_INPUT_OUTPUT }; } // namespace operators @@ -327,6 +428,5 @@ class FusionGRUKernel : public framework::OpKernel { namespace ops = paddle::operators; REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker, paddle::framework::DefaultGradOpDescMaker); -REGISTER_OP_CPU_KERNEL( - fusion_gru, ops::FusionGRUKernel, - ops::FusionGRUKernel); +REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel, + ops::FusionGRUKernel); diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 00ba5ce8ee5e4084c8af204cfc37fe80c437f0d7..b3f7e0c0097b469998049a1db65d56a28cf02b5e 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/port.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 1b75df5d7d97e54dfdc461660e53a368311e3778..d7f0f3c6280db7d121bf8821ec6d578e22a33da6 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -1,4 +1,6 @@ +if (NOT WIN32) add_subdirectory(detail) +endif(NOT WIN32) function(math_library TARGET) # math_library is a function to create math library. @@ -38,9 +40,13 @@ math_library(context_project DEPS im2col math_function) math_library(cross_entropy) math_library(cos_sim_functor) math_library(depthwise_conv) -math_library(gru_compute DEPS activation_functions math_function) math_library(im2col) + +if (NOT WIN32) # windows do not support avx functions yet. +math_library(gru_compute DEPS activation_functions math_function) math_library(lstm_compute DEPS activation_functions) +endif (NOT WIN32) + cc_library(blas SRCS blas.cc DEPS cblas framework_proto device_context) math_library(math_function DEPS blas) math_library(maxouting) @@ -51,7 +57,9 @@ math_library(sequence_padding) math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) math_library(softmax DEPS math_function) +if (NOT WIN32) math_library(matrix_bit_code) +endif (NOT WIN32) math_library(unpooling) math_library(vol2col) diff --git a/paddle/fluid/operators/math/cpu_vec.h b/paddle/fluid/operators/math/cpu_vec.h index 5693761e9ffd96b40040223b5498b63b0274bf0f..9560e3a3c15ca63892fbe3552679a22f027f11e2 100644 --- a/paddle/fluid/operators/math/cpu_vec.h +++ b/paddle/fluid/operators/math/cpu_vec.h @@ -132,6 +132,121 @@ inline void vec_scal(const int n, vec_scal(n, a, x, y); } +template +inline void vec_bias_sub(const int n, const T a, const T* x, T* y) { + for (int i = 0; i < n; ++i) { + y[i] = a - x[i]; + } +} + +template <> +inline void vec_bias_sub(const int n, const float a, + const float* x, float* y) { +#ifdef __AVX__ + constexpr int block = AVX_FLOAT_BLOCK; + if (n < block) { + vec_bias_sub(n, a, x, y); + return; + } + const int rest = n % block; + const int end = n - rest; + int i = 0; + __m256 bias = _mm256_set1_ps(a); + __m256 tmp; +#define MOVE_ONE_STEP \ + tmp = _mm256_loadu_ps(x + i); \ + tmp = _mm256_sub_ps(bias, tmp); \ + _mm256_storeu_ps(y + i, tmp) + for (i = 0; i < end; i += block) { + MOVE_ONE_STEP; + } +#undef MOVE_ONE_STEP + if (rest == 0) { + return; + } + // can not continue move step if src and dst are inplace + for (i = n - rest; i < n; ++i) { + y[i] = a - x[i]; + } +#else + vec_bias_sub(n, a, x, y); +#endif +} + +template <> +inline void vec_bias_sub(const int n, const float a, + const float* x, float* y) { + vec_bias_sub(n, a, x, y); +} + +template <> +inline void vec_bias_sub(const int n, + const float a, + const float* x, + float* y) { + // TODO(TJ): enable me + vec_bias_sub(n, a, x, y); +} + +// out = x*y + (1-x)*z +template +inline void vec_cross(const int n, const T* x, const T* y, const T* z, T* out) { + for (int i = 0; i < n; ++i) { + out[i] = x[i] * y[i] + (static_cast(1) - x[i]) * z[i]; + } +} + +template <> +inline void vec_cross(const int n, const float* x, + const float* y, const float* z, + float* out) { +#ifdef __AVX__ + constexpr int block = AVX_FLOAT_BLOCK; + if (n < block) { + vec_cross(n, x, y, z, out); + return; + } + const int rest = n % block; + const int end = n - rest; + int i = 0; + __m256 bias = _mm256_set1_ps(1.f); + __m256 tmpx, tmpy, tmpz; + for (i = 0; i < end; i += block) { + tmpx = _mm256_loadu_ps(x + i); + tmpy = _mm256_loadu_ps(y + i); + tmpz = _mm256_loadu_ps(z + i); + tmpy = _mm256_mul_ps(tmpx, tmpy); + tmpx = _mm256_sub_ps(bias, tmpx); + tmpz = _mm256_mul_ps(tmpx, tmpz); + tmpz = _mm256_add_ps(tmpy, tmpz); + _mm256_storeu_ps(out + i, tmpz); + } + if (rest == 0) { + return; + } + // can not continue move step if src and dst are inplace + for (i = n - rest; i < n; ++i) { + out[i] = x[i] * y[i] + (1.f - x[i]) * z[i]; + } +#else + vec_cross(n, x, y, z, out); +#endif +} + +template <> +inline void vec_cross(const int n, const float* x, + const float* y, + const float* z, float* out) { + vec_cross(n, x, y, z, out); +} + +template <> +inline void vec_cross( + const int n, const float* x, const float* y, const float* z, float* out) { + // TODO(TJ): enable me + vec_cross(n, x, y, z, out); +} + template inline void vec_add_bias(const int n, const T a, const T* x, T* y) { for (int i = 0; i < n; ++i) { diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 9a6e646b28fdec78734eb4e7b98c8acf688b2645..5923792902a81521256de300f77955f1ea3d16c6 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -55,7 +55,7 @@ struct TensorSetConstantCPU { TensorSetConstantCPU(framework::Tensor* tensor, float value) : tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { auto cpu = platform::CPUPlace(); auto* begin = tensor_->mutable_data(cpu); std::fill(begin, begin + tensor_->numel(), static_cast(value_)); diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 12d1baa8fb544a8b9684e43204c61ba410d1b295..79b7538ad05b0ff348b8264d50b63211b5254e80 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -52,7 +52,7 @@ struct TensorSetConstantGPU { : context_(context), tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { SetConstant functor; functor(reinterpret_cast(context_), tensor_, static_cast(value_)); diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 07372235a7c23832e528c3e852a4747f4244b833..a3186f82d0c0cc6c9585735ddf7e9bb4db7126cb 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -92,7 +92,7 @@ class LoDTensor2BatchFunctor { // Calculate the start position of each batch. // example: sequences = {s0, s1, s2} // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 - // num_batch = 5, + // max_seqlen = 5, // batchIndex = {b0, b1, b2, b3, b4} // b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 // batch_start_positions[6] = {0, 3, 6, 9, 11, 12} @@ -109,7 +109,7 @@ class LoDTensor2BatchFunctor { // where 1 is the second sequence, // 0 is the first sequence, // 2 is the third sequence. - // The num_batch represents batch size after rearranging the + // The max_seqlen represents batch size after rearranging the // input LodTensor. It is also the maximum length of input sequence. paddle::framework::LoD batch_lods; @@ -118,8 +118,8 @@ class LoDTensor2BatchFunctor { batch_lods.emplace_back(std::vector{0}); // batch_lods[0] is the start positions for batch LoDTensor - int num_batch = seq_info[0].length; - batch_lods[0].resize(static_cast(num_batch + 1)); + int max_seqlen = seq_info[0].length; + batch_lods[0].resize(static_cast(max_seqlen + 1)); // batch_lods[1] is the raw index in the input LoDTensor batch_lods[1].resize(static_cast(lod_tensor.dims()[0])); // batch_lods[2] is the sort order for the input LoDTensor. @@ -128,7 +128,7 @@ class LoDTensor2BatchFunctor { size_t* batch_starts = batch_lods[0].data(); size_t* seq2batch_idx = batch_lods[1].data(); batch_starts[0] = 0; - for (int n = 0; n < num_batch; n++) { + for (int n = 0; n < max_seqlen; n++) { auto batch_id = static_cast(batch_starts[n]); for (size_t i = 0; i < seq_info.size(); ++i) { int seq_len = seq_info[i].length; diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 625065692c1f32c89d9e566d00051e237ac9a3af..59d8b9b8a8d554eb16826712ff634eed5df2d648 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -41,7 +41,7 @@ struct OneHotOpCUDAFunctor { : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index 7e77f25089c4bd0297b0eb5a0ed7555cc0af5a9f..1ebd2676496940ff8f90caaaded5c8227bd7ae78 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -31,7 +31,7 @@ struct OneHotOpFunctor { : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); diff --git a/paddle/fluid/operators/prelu_op.h b/paddle/fluid/operators/prelu_op.h index f9076cbc678534fd5490fa0d7adeac0e50909a39..12f1525594ecf0887618616ffe563bd2bda32496 100644 --- a/paddle/fluid/operators/prelu_op.h +++ b/paddle/fluid/operators/prelu_op.h @@ -38,10 +38,9 @@ class PReluKernel : public framework::OpKernel { auto dim = x->dims(); int index = 0; int i = 0; - int temp = 0; if (mode == "channel") { + int temp = numel / (dim[0] * dim[1]); for (i = 0; i < numel; i++) { - temp = numel / (dim[0] * dim[1]); index = (i / temp) % dim[1]; o_ptr[i] = x_ptr[i] > 0 ? x_ptr[i] : alpha_ptr[index] * x_ptr[i]; } diff --git a/paddle/fluid/operators/sampling_id_op.h b/paddle/fluid/operators/sampling_id_op.h index 01308e416a9313bad13ded4e40c79bb0550e31ed..133d3f72dbd6ab13c98d124369038309c94cba5b 100644 --- a/paddle/fluid/operators/sampling_id_op.h +++ b/paddle/fluid/operators/sampling_id_op.h @@ -53,7 +53,7 @@ class SamplingIdKernel : public framework::OpKernel { static_cast(context.Attr("min")), static_cast(context.Attr("max"))); - std::vector ids(batch_size); + std::vector ids(batch_size); for (int i = 0; i < batch_size; ++i) { T r = dist(engine); int idx = width - 1; @@ -63,7 +63,7 @@ class SamplingIdKernel : public framework::OpKernel { break; } } - ids[i] = ins_vector[idx]; + ids[i] = int64_t(idx); } std::vector out_dim; diff --git a/paddle/fluid/operators/sequence_mask_op.h b/paddle/fluid/operators/sequence_mask_op.h index 0dd554adfe57e469c2fac17f27adae2db7003a6a..18acb735cecabd1e01f7821c880fd8ed5e52971f 100644 --- a/paddle/fluid/operators/sequence_mask_op.h +++ b/paddle/fluid/operators/sequence_mask_op.h @@ -99,7 +99,7 @@ struct SequenceMaskFunctor { : ctx_(ctx), x_(x), y_(y), limits_(limits), maxlen_(maxlen) {} template - void operator()() const { + void apply() const { auto *y_data = y_->mutable_data(ctx_.GetPlace()); platform::ForRange for_range(ctx_, limits_); for_range(SequenceMaskForRangeFunctor(x_, y_data, maxlen_)); diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index 8146c5f56104b7dec86b1c4491ed10fc2e94b58b..29d2fb989754f5621222768a279a1c898ea1c355 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -62,7 +62,10 @@ class ShrinkRNNMemoryOp : public ArrayOp { } if (dst_num_rows != 0) { - out_tensor.ShareDataWith(x_tensor.Slice(0, height)); + out_tensor.mutable_data(place, x_tensor.type()); + auto dev_ctx = platform::DeviceContextPool::Instance().Get(place); + framework::TensorCopy(x_tensor.Slice(0, height), place, *dev_ctx, + &out_tensor); } } }; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 2cc26da013f59f5b7ee1747d57baca9c1c0efe2c..3ec20ad7e5b4d8104e7228da9f8e7f5409417301 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -16,6 +16,9 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memory.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/framework/rw_lock.h" +#endif namespace paddle { namespace platform { @@ -142,7 +145,59 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { mutable unsigned int* semaphore_; }; -CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) { +class CudnnHolder { + public: + CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place) + : workspace_(nullptr), workspace_len_(0), stream_(stream), place_(place) { + PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); + PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, *stream_)); + } + + cudnnHandle_t cudnn_handle() const { return cudnn_handle_; } + + void RunFunc(const std::function& cudnn_func, + size_t required_workspace_len) { + std::lock_guard lock(mtx_); + if (required_workspace_len > workspace_len_) { + ReallocateWorkspace(required_workspace_len); + } + cudnn_func(workspace_); + } + + ~CudnnHolder() { + PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); + if (workspace_ != nullptr) { + paddle::memory::Free(place_, workspace_); + } + } + + private: + void ReallocateWorkspace(size_t required_workspace_len) { + if (required_workspace_len <= workspace_len_) { + return; + } + void* new_workspace = paddle::memory::Alloc(place_, required_workspace_len); + if (workspace_ != nullptr) { + // Maybe someone is using the current workspace + PADDLE_ENFORCE(cudaStreamSynchronize(*stream_)); + paddle::memory::Free(place_, workspace_); + } + workspace_ = new_workspace; + workspace_len_ = required_workspace_len; + } + + cudnnHandle_t cudnn_handle_; + void* workspace_; + size_t workspace_len_; + + const cudaStream_t* stream_; // not owned; + const CUDAPlace place_; + + std::mutex mtx_; +}; + +CUDADeviceContext::CUDADeviceContext(CUDAPlace place) + : place_(place), cudnn_holder_(nullptr) { SetDeviceId(place_.device); compute_capability = GetCUDAComputeCapability(place_.device); multi_process = GetCUDAMultiProcessors(place_.device); @@ -154,10 +209,7 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) { PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_)); PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_)); if (dynload::HasCUDNN()) { - PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); - PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_)); - } else { - cudnn_handle_ = nullptr; + cudnn_holder_.reset(new CudnnHolder(&stream_, place)); } } @@ -165,9 +217,6 @@ CUDADeviceContext::~CUDADeviceContext() { SetDeviceId(place_.device); Wait(); PADDLE_ENFORCE(dynload::cublasDestroy(cublas_handle_)); - if (cudnn_handle_ != nullptr) { - PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); - } eigen_stream_.reset(); eigen_device_.reset(); PADDLE_ENFORCE(cudaStreamDestroy(stream_)); @@ -196,7 +245,14 @@ cublasHandle_t CUDADeviceContext::cublas_handle() const { return cublas_handle_; } -cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } +cudnnHandle_t CUDADeviceContext::cudnn_handle() const { + return cudnn_holder_->cudnn_handle(); +} + +void CUDADeviceContext::RunCudnnFuncWithWorkspace( + const std::function& cudnn_func, size_t workspace_len) const { + cudnn_holder_->RunFunc(cudnn_func, workspace_len); +} cudaStream_t CUDADeviceContext::stream() const { return stream_; } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index b97dad20db0b003b4886b7c7cfd1c8de8bf44ab9..3ed49fc4233d4c0cd6cc16319eda08480ab9b434 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -69,6 +69,7 @@ struct DefaultDeviceContextType { #ifdef PADDLE_WITH_CUDA class EigenCudaStreamDevice; +class CudnnHolder; class CUDADeviceContext : public DeviceContext { public: @@ -96,6 +97,11 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Return cudnn handle in the device context. */ cudnnHandle_t cudnn_handle() const; + /*! \brief Run a cudnn function with the workspace provided by + * CUDADeviceContext */ + void RunCudnnFuncWithWorkspace(const std::function& cudnn_func, + size_t workspace_len) const; + /*! \brief Return cuda stream in the device context. */ cudaStream_t stream() const; @@ -111,8 +117,8 @@ class CUDADeviceContext : public DeviceContext { std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; + std::unique_ptr cudnn_holder_; cudaStream_t stream_; - cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; int compute_capability; diff --git a/python/paddle/fluid/inferencer.py b/python/paddle/fluid/inferencer.py index 3d2ef566173f81b29a6d8ea79cff00991a4ef3c4..a9b94a20720615dbfca97749463f27dbc88ac64f 100644 --- a/python/paddle/fluid/inferencer.py +++ b/python/paddle/fluid/inferencer.py @@ -98,10 +98,9 @@ class Inferencer(object): raise ValueError( "inputs should be a map of {'input_name': input_var}") - with executor.scope_guard(self.scope): - results = self.exe.run(self.inference_program, - feed=inputs, - fetch_list=[self.predict_var], + with self._prog_and_scope_guard(): + results = self.exe.run(feed=inputs, + fetch_list=[self.predict_var.name], return_numpy=return_numpy) return results diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py index be494a0d340c62fb35afbf97fba38eff08a965e6..2e15c224f662171bf0fee228bdd9d36189fbe499 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py @@ -16,7 +16,9 @@ from __future__ import print_function import paddle import paddle.fluid as fluid +import paddle.fluid.core as core import numpy +import os import cifar10_small_test_set @@ -89,7 +91,7 @@ def optimizer_func(): return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, train_program, params_dirname): +def train(use_cuda, train_program, parallel, params_dirname): BATCH_SIZE = 128 EPOCH_NUM = 1 @@ -116,7 +118,10 @@ def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, optimizer_func=optimizer_func, place=place) + train_func=train_program, + optimizer_func=optimizer_func, + place=place, + parallel=parallel) trainer.train( reader=train_reader, @@ -125,10 +130,13 @@ def train(use_cuda, train_program, params_dirname): feed_order=['pixel', 'label']) -def infer(use_cuda, inference_program, params_dirname=None): +def infer(use_cuda, inference_program, parallel, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - infer_func=inference_program, param_path=params_dirname, place=place) + infer_func=inference_program, + param_path=params_dirname, + place=place, + parallel=parallel) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -139,22 +147,34 @@ def infer(use_cuda, inference_program, params_dirname=None): print("infer results: ", results) -def main(use_cuda): +def main(use_cuda, parallel): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_resnet.inference.model" + os.environ['CPU_NUM'] = str(4) train( use_cuda=use_cuda, train_program=train_network, - params_dirname=save_path) + params_dirname=save_path, + parallel=parallel) + # FIXME(zcd): in the inference stage, the number of + # input data is one, it is not appropriate to use parallel. + if parallel and use_cuda: + return + + os.environ['CPU_NUM'] = str(1) infer( use_cuda=use_cuda, inference_program=inference_network, - params_dirname=save_path) + params_dirname=save_path, + parallel=parallel) if __name__ == '__main__': for use_cuda in (False, True): - main(use_cuda=use_cuda) + for parallel in (False, True): + if use_cuda and not core.is_compiled_with_cuda(): + continue + main(use_cuda=use_cuda, parallel=parallel) diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py index dbc7bc06c93157f271c79e85b6925468e861e57f..2f205de1c011cd714439d4896adc8862ce68d99b 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py @@ -16,7 +16,9 @@ from __future__ import print_function import paddle import paddle.fluid as fluid +import paddle.fluid.core as core import numpy +import os import cifar10_small_test_set @@ -68,7 +70,7 @@ def optimizer_func(): return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, train_program, params_dirname): +def train(use_cuda, train_program, parallel, params_dirname): BATCH_SIZE = 128 train_reader = paddle.batch( paddle.reader.shuffle( @@ -93,7 +95,10 @@ def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, place=place, optimizer_func=optimizer_func) + train_func=train_program, + place=place, + optimizer_func=optimizer_func, + parallel=parallel) trainer.train( reader=train_reader, @@ -102,10 +107,13 @@ def train(use_cuda, train_program, params_dirname): feed_order=['pixel', 'label']) -def infer(use_cuda, inference_program, params_dirname=None): +def infer(use_cuda, inference_program, parallel, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - infer_func=inference_program, param_path=params_dirname, place=place) + infer_func=inference_program, + param_path=params_dirname, + place=place, + parallel=parallel) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -116,22 +124,31 @@ def infer(use_cuda, inference_program, params_dirname=None): print("infer results: ", results) -def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return +def main(use_cuda, parallel): save_path = "image_classification_vgg.inference.model" + os.environ['CPU_NUM'] = str(4) train( use_cuda=use_cuda, train_program=train_network, - params_dirname=save_path) + params_dirname=save_path, + parallel=parallel) + # FIXME(zcd): in the inference stage, the number of + # input data is one, it is not appropriate to use parallel. + if parallel and use_cuda: + return + os.environ['CPU_NUM'] = str(1) infer( use_cuda=use_cuda, inference_program=inference_network, - params_dirname=save_path) + params_dirname=save_path, + parallel=parallel) if __name__ == '__main__': for use_cuda in (False, True): - main(use_cuda=use_cuda) + for parallel in (False, True): + if use_cuda and not core.is_compiled_with_cuda(): + continue + main(use_cuda=use_cuda, parallel=parallel) diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py index 187bef1b0c1a614fbca88ef22097831d7bd5cd7f..a5adf68158526b628deba3fc7ca6856eb7c9cded 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py @@ -64,14 +64,14 @@ def optimizer_func(): return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, train_program, params_dirname): +def train(use_cuda, train_program, parallel, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( train_func=train_program, place=place, optimizer_func=optimizer_func, - parallel=True) + parallel=parallel) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): @@ -108,11 +108,14 @@ def train(use_cuda, train_program, params_dirname): feed_order=['img', 'label']) -def infer(use_cuda, inference_program, params_dirname=None): +def infer(use_cuda, inference_program, parallel, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - infer_func=inference_program, param_path=params_dirname, place=place) + infer_func=inference_program, + param_path=params_dirname, + place=place, + parallel=parallel) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -123,20 +126,32 @@ def infer(use_cuda, inference_program, params_dirname=None): print("infer results: ", results[0]) -def main(use_cuda): +def main(use_cuda, parallel): params_dirname = "recognize_digits_conv.inference.model" # call train() with is_local argument to run distributed train + os.environ['CPU_NUM'] = str(4) train( use_cuda=use_cuda, train_program=train_program, - params_dirname=params_dirname) + params_dirname=params_dirname, + parallel=parallel) + + # FIXME(zcd): in the inference stage, the number of + # input data is one, it is not appropriate to use parallel. + if parallel and use_cuda: + return + os.environ['CPU_NUM'] = str(1) infer( use_cuda=use_cuda, inference_program=inference_program, - params_dirname=params_dirname) + params_dirname=params_dirname, + parallel=parallel) if __name__ == '__main__': - # for use_cuda in (False, True): - main(use_cuda=core.is_compiled_with_cuda()) + for use_cuda in (False, True): + for parallel in (False, True): + if use_cuda and not core.is_compiled_with_cuda(): + continue + main(use_cuda=use_cuda, parallel=parallel) diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py index b95e7db122adbb1414da1691926c920b963fd6fe..e7d8b23b3253d368210c08be4e53c06ba0c5d618 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py @@ -16,6 +16,7 @@ from __future__ import print_function import argparse import paddle.fluid as fluid +import paddle.fluid.core as core import paddle import sys import numpy @@ -50,11 +51,14 @@ def optimizer_func(): return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, train_program, params_dirname): +def train(use_cuda, train_program, params_dirname, parallel): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, place=place, optimizer_func=optimizer_func) + train_func=train_program, + place=place, + optimizer_func=optimizer_func, + parallel=parallel) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): @@ -86,11 +90,14 @@ def train(use_cuda, train_program, params_dirname): feed_order=['img', 'label']) -def infer(use_cuda, inference_program, params_dirname=None): +def infer(use_cuda, inference_program, parallel, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - infer_func=inference_program, param_path=params_dirname, place=place) + infer_func=inference_program, + param_path=params_dirname, + place=place, + parallel=parallel) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -101,20 +108,32 @@ def infer(use_cuda, inference_program, params_dirname=None): print("infer results: ", results[0]) -def main(use_cuda): +def main(use_cuda, parallel): params_dirname = "recognize_digits_mlp.inference.model" # call train() with is_local argument to run distributed train + os.environ['CPU_NUM'] = str(4) train( use_cuda=use_cuda, train_program=train_program, - params_dirname=params_dirname) + params_dirname=params_dirname, + parallel=parallel) + + # FIXME(zcd): in the inference stage, the number of + # input data is one, it is not appropriate to use parallel. + if parallel and use_cuda: + return + os.environ['CPU_NUM'] = str(1) infer( use_cuda=use_cuda, inference_program=inference_program, - params_dirname=params_dirname) + params_dirname=params_dirname, + parallel=parallel) if __name__ == '__main__': - # for use_cuda in (False, True): - main(use_cuda=False) + for use_cuda in (False, True): + for parallel in (False, True): + if use_cuda and not core.is_compiled_with_cuda(): + continue + main(use_cuda=use_cuda, parallel=parallel) diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 1d9ab44ed447468fb8383c52747d14970ae27ced..b85501ef6b80d1f5004aa0dd08c3123d3bda48a5 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -438,7 +438,7 @@ class TestLocalLookupTable(TestDistLookupTableBase): # 2 optimize for table adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["sum", "adam", "scale", "scale"]) + ["sum", "scale", "adam", "scale", "scale"]) trainer, _ = self.get_trainer() self.assertEqual(len(trainer.blocks), 1) diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index cc0494774a5f2f24faaae65f193fc3ff9270d9ac..820ad4af88e9dc49cbe57ac182e1ba0402725f3d 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -21,28 +21,41 @@ from op_test import OpTest class TestFakeQuantizeOp(OpTest): def setUp(self): - self.op_type = "fake_quantize" + self.op_type = "fake_quantize_abs_max" + self.attrs = {'bit_length': 8} + self.inputs = {'X': np.random.random((124, 240)).astype("float32"), } + scale = np.max(np.abs(self.inputs['X'])).astype("float32") + self.outputs = { + 'Out': np.round(self.inputs['X'] / scale * ( + (1 << (self.attrs['bit_length'] - 1)) - 1)), + 'OutScale': np.array(scale).astype("float32"), + } + + def test_check_output(self): + self.check_output() + + +class TestFakeQuantizeOp(OpTest): + def setUp(self): + self.op_type = "fake_quantize_range_abs_max" self.attrs = { - 'bit_length': 8, - 'quantize_type': 'abs_max', - 'window_size': 10000 + 'bit_length': int(5), + 'window_size': int(1), + 'is_test': False } self.inputs = { - 'X': np.random.random((10, 10)).astype("float32"), - 'InScales': np.zeros(self.attrs['window_size']).astype("float32"), - 'InCurrentIter': np.zeros(1).astype("float32"), - 'InMovingScale': np.zeros(1).astype("float32") - } - self.scale = { - 'abs_max': np.max(np.abs(self.inputs['X'])).astype("float32") + 'X': np.random.random((8, 16, 7, 7)).astype("float32"), + 'Iter': np.zeros(1).astype("int64"), + 'InScale': np.zeros(1).astype("float32") } + scale = np.max(np.abs(self.inputs['X'])).astype("float32") + out_scales = np.zeros(self.attrs['window_size']).astype("float32") + out_scales[0] = scale self.outputs = { - 'Out': np.round(self.inputs['X'] / self.scale['abs_max'] * ( + 'Out': np.round(self.inputs['X'] / scale * ( (1 << (self.attrs['bit_length'] - 1)) - 1)), - 'OutScales': np.zeros(self.attrs['window_size']).astype("float32"), - 'OutMovingScale': - np.array([self.scale['abs_max']]).astype("float32"), - 'OutCurrentIter': np.zeros(1).astype("float32") + 'OutScale': scale, + 'OutScales': out_scales, } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_sampling_id_op.py b/python/paddle/fluid/tests/unittests/test_sampling_id_op.py index 708265b4576809b1f4157d54989c6138c6e5a2b0..674ef2ddf44edb4246c9d952cb75b36fe3d6ddc8 100644 --- a/python/paddle/fluid/tests/unittests/test_sampling_id_op.py +++ b/python/paddle/fluid/tests/unittests/test_sampling_id_op.py @@ -25,9 +25,9 @@ class TestSamplingIdOp(OpTest): self.op_type = "sampling_id" self.use_mkldnn = False self.init_kernel_type() - self.X = np.random.random((8, 4)).astype('float32') + self.X = np.random.random((100, 10)).astype('float32') self.inputs = {"X": self.X} - self.Y = np.random.random(8).astype('float32') + self.Y = np.random.random(100).astype('int64') self.outputs = {'Out': self.Y} self.attrs = {'max': 1.0, 'min': 0.0, 'seed': 1} @@ -36,6 +36,16 @@ class TestSamplingIdOp(OpTest): y1 = self.out self.check_output_customized(self.verify_output) y2 = self.out + + # check dtype + assert y1.dtype == np.int64 + assert y2.dtype == np.int64 + + # check output is index ids of inputs + inputs_ids = np.arange(self.X.shape[1]) + assert np.isin(y1, inputs_ids).all() + assert np.isin(y2, inputs_ids).all() + self.assertTrue(np.array_equal(y1, y2)) self.assertEqual(len(y1), len(self.Y)) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index a6266a7b0c9ac40eac7b2823fc7ddf38f55357a9..bddeb6617c1743de946b3c5b4b0a465d85f35ce3 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -1390,13 +1390,11 @@ class DistributeTranspiler(object): inputs={"X": vars2merge}, outputs={"Out": merged_var}, attrs={"use_mkldnn": False}) - # TODO(panyx0718): What if it's SELECTED_ROWS. - if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: - optimize_block.append_op( - type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainer_num)}) + optimize_block.append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainer_num)}) return merged_var def _append_pserver_ops(self, optimize_block, opt_op, endpoint,