未验证 提交 0a5a1c3b 编写于 作者: saxon_zh's avatar saxon_zh 提交者: GitHub

[cherry-pick]optimize some words in image_segmentation/high_level_api/getting_started (#2672)

* optimize some words in image_segmentation/hight_level_api/getting_started

* fix image url error in image_segmentation
上级 d5308822
......@@ -34,7 +34,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 1,
"metadata": {},
"outputs": [
{
......@@ -43,7 +43,7 @@
"'2.0.0-beta0'"
]
},
"execution_count": 21,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
......@@ -58,6 +58,9 @@
"import paddle\n",
"from paddle.nn import functional as F\n",
"\n",
"device = paddle.set_device('gpu')\n",
"paddle.disable_static(device)\n",
"\n",
"paddle.__version__"
]
},
......@@ -173,7 +176,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
......@@ -235,7 +238,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
......@@ -388,7 +391,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
......@@ -464,7 +467,7 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 5,
"metadata": {
"colab": {},
"colab_type": "code",
......@@ -527,7 +530,7 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 6,
"metadata": {
"colab": {},
"colab_type": "code",
......@@ -587,7 +590,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 7,
"metadata": {
"colab": {},
"colab_type": "code",
......@@ -648,7 +651,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 8,
"metadata": {
"colab": {},
"colab_type": "code",
......@@ -724,7 +727,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 9,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
......@@ -743,28 +746,28 @@
"--------------------------------------------------------------------------------\n",
" Layer (type) Input Shape Output Shape Param #\n",
"================================================================================\n",
" Conv2d-38 [-1, 3, 160, 160] [-1, 32, 80, 80] 896\n",
" BatchNorm2d-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n",
" ReLU-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n",
" ReLU-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n",
" Conv2d-49 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152\n",
" Conv2d-50 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024\n",
"SeparableConv2d-17 [-1, 128, 20, 20] [-1, 256, 20, 20] 0\n",
" BatchNorm2d-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024\n",
" Conv2d-51 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304\n",
" Conv2d-52 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792\n",
"SeparableConv2d-18 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n",
" MaxPool2d-9 [-1, 256, 20, 20] [-1, 256, 10, 10] 0\n",
" Conv2d-53 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024\n",
" Encoder-9 [-1, 128, 20, 20] [-1, 256, 10, 10] 0\n",
" ReLU-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n",
"ConvTranspose2d-17 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464\n",
" BatchNorm2d-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n",
"ConvTranspose2d-18 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248\n",
" Upsample-8 [-1, 64, 80, 80] [-1, 64, 160, 160] 0\n",
" Conv2d-57 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080\n",
" Decoder-9 [-1, 64, 80, 80] [-1, 32, 160, 160] 0\n",
" Conv2d-58 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156\n",
" Conv2d-1 [-1, 3, 160, 160] [-1, 32, 80, 80] 896\n",
" BatchNorm2d-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n",
" ReLU-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n",
" ReLU-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n",
" Conv2d-12 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152\n",
" Conv2d-13 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024\n",
"SeparableConv2d-5 [-1, 128, 20, 20] [-1, 256, 20, 20] 0\n",
" BatchNorm2d-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024\n",
" Conv2d-14 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304\n",
" Conv2d-15 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792\n",
"SeparableConv2d-6 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n",
" MaxPool2d-3 [-1, 256, 20, 20] [-1, 256, 10, 10] 0\n",
" Conv2d-16 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024\n",
" Encoder-3 [-1, 128, 20, 20] [-1, 256, 10, 10] 0\n",
" ReLU-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n",
"ConvTranspose2d-7 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464\n",
" BatchNorm2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n",
"ConvTranspose2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248\n",
" Upsample-4 [-1, 64, 80, 80] [-1, 64, 160, 160] 0\n",
" Conv2d-20 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080\n",
" Decoder-4 [-1, 64, 80, 80] [-1, 32, 160, 160] 0\n",
" Conv2d-21 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156\n",
"================================================================================\n",
"Total params: 168,420\n",
"Trainable params: 167,140\n",
......@@ -784,7 +787,7 @@
"{'total_params': 168420, 'trainable_params': 167140}"
]
},
"execution_count": 31,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
......@@ -808,34 +811,6 @@
"## 5.模型训练"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "8Sskbyz58X4J"
},
"source": [
"### 5.1 配置信息\n",
"\n",
"定义训练BATCH_SIZE、训练轮次和计算设备等信息。"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "4fSkTiRB8OpP"
},
"outputs": [],
"source": [
"BATCH_SIZE = 32\n",
"EPOCHS = 15\n",
"device = paddle.set_device('gpu')\n",
"paddle.disable_static(device)"
]
},
{
"cell_type": "markdown",
"metadata": {
......@@ -843,14 +818,14 @@
"id": "x_vaedRa8eoy"
},
"source": [
"### 5.3 自定义Loss\n",
"### 5.1 自定义Loss\n",
"\n",
"在这个任务中我们使用SoftmaxWithCrossEntropy损失函数来做计算,飞桨中有functional形式的API,这里我们做一个自定义操作,实现一个Class形式API放到模型训练中使用。没有直接使用CrossEntropyLoss的原因主要是对计算维度的自定义需求,本次需要进行softmax计算的维度是1,不是默认的最后一维,所以我们采用上面提到的损失函数,通过axis参数来指定softmax计算维度。"
]
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 10,
"metadata": {
"colab": {},
"colab_type": "code",
......@@ -877,7 +852,7 @@
"id": "rj6MPPMkJIdZ"
},
"source": [
"### 5.4 启动模型训练\n",
"### 5.2 启动模型训练\n",
"\n",
"使用模型代码进行Model实例生成,使用prepare接口定义优化器、损失函数和评价指标等信息,用于后续训练使用。在所有初步配置完成后,调用fit接口开启训练执行过程,调用fit时只需要将前面定义好的训练数据集、测试数据集、训练轮次(Epoch)和批次大小(batch_size)配置好即可。"
]
......@@ -903,12 +878,12 @@
" epsilon=1e-07, \n",
" centered=False,\n",
" parameters=model.parameters())\n",
"model = paddle.Model(PetModel(num_classes))\n",
"model = paddle.Model(PetNet(num_classes))\n",
"model.prepare(optim, SoftmaxWithCrossEntropy())\n",
"model.fit(train_dataset, \n",
" val_dataset, \n",
" epochs=EPOCHS, \n",
" batch_size=BATCH_SIZE)"
" epochs=15, \n",
" batch_size=32)"
]
},
{
......
......@@ -26,6 +26,9 @@
import paddle
from paddle.nn import functional as F
device = paddle.set_device('gpu')
paddle.disable_static(device)
paddle.__version__
......@@ -326,7 +329,7 @@ DataLoader(多进程数据集加载)。
.. image:: https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png?raw=true
.. image:: https://raw.githubusercontent.com/PaddlePaddle/FluidDoc/develop/doc/paddle/tutorial/cv_case/image_segmentation/pets_image_segmentation_U_Net_like_files/pets_image_segmentation_U_Net_like_001.png
4.模型组网
......@@ -551,28 +554,28 @@ Layer类,整个过程是把\ ``filter_size * filter_size * num_filters``\ 的C
--------------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
================================================================================
Conv2d-38 [-1, 3, 160, 160] [-1, 32, 80, 80] 896
BatchNorm2d-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 128
ReLU-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 0
ReLU-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 0
Conv2d-49 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152
Conv2d-50 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024
SeparableConv2d-17 [-1, 128, 20, 20] [-1, 256, 20, 20] 0
BatchNorm2d-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024
Conv2d-51 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304
Conv2d-52 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792
SeparableConv2d-18 [-1, 256, 20, 20] [-1, 256, 20, 20] 0
MaxPool2d-9 [-1, 256, 20, 20] [-1, 256, 10, 10] 0
Conv2d-53 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024
Encoder-9 [-1, 128, 20, 20] [-1, 256, 10, 10] 0
ReLU-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 0
ConvTranspose2d-17 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464
BatchNorm2d-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 128
ConvTranspose2d-18 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248
Upsample-8 [-1, 64, 80, 80] [-1, 64, 160, 160] 0
Conv2d-57 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080
Decoder-9 [-1, 64, 80, 80] [-1, 32, 160, 160] 0
Conv2d-58 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156
Conv2d-1 [-1, 3, 160, 160] [-1, 32, 80, 80] 896
BatchNorm2d-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 128
ReLU-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 0
ReLU-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 0
Conv2d-12 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152
Conv2d-13 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024
SeparableConv2d-5 [-1, 128, 20, 20] [-1, 256, 20, 20] 0
BatchNorm2d-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024
Conv2d-14 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304
Conv2d-15 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792
SeparableConv2d-6 [-1, 256, 20, 20] [-1, 256, 20, 20] 0
MaxPool2d-3 [-1, 256, 20, 20] [-1, 256, 10, 10] 0
Conv2d-16 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024
Encoder-3 [-1, 128, 20, 20] [-1, 256, 10, 10] 0
ReLU-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 0
ConvTranspose2d-7 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464
BatchNorm2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 128
ConvTranspose2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248
Upsample-4 [-1, 64, 80, 80] [-1, 64, 160, 160] 0
Conv2d-20 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080
Decoder-4 [-1, 64, 80, 80] [-1, 32, 160, 160] 0
Conv2d-21 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156
================================================================================
Total params: 168,420
Trainable params: 167,140
......@@ -597,19 +600,7 @@ Layer类,整个过程是把\ ``filter_size * filter_size * num_filters``\ 的C
5.模型训练
----------
5.1 配置信息
~~~~~~~~~~~~
定义训练BATCH_SIZE、训练轮次和计算设备等信息。
.. code:: ipython3
BATCH_SIZE = 32
EPOCHS = 15
device = paddle.set_device('gpu')
paddle.disable_static(device)
5.3 自定义Loss
5.1 自定义Loss
~~~~~~~~~~~~~~
在这个任务中我们使用SoftmaxWithCrossEntropy损失函数来做计算,飞桨中有functional形式的API,这里我们做一个自定义操作,实现一个Class形式API放到模型训练中使用。没有直接使用CrossEntropyLoss的原因主要是对计算维度的自定义需求,本次需要进行softmax计算的维度是1,不是默认的最后一维,所以我们采用上面提到的损失函数,通过axis参数来指定softmax计算维度。
......@@ -627,7 +618,7 @@ Layer类,整个过程是把\ ``filter_size * filter_size * num_filters``\ 的C
axis=1)
return paddle.mean(loss)
5.4 启动模型训练
5.2 启动模型训练
~~~~~~~~~~~~~~~~
使用模型代码进行Model实例生成,使用prepare接口定义优化器、损失函数和评价指标等信息,用于后续训练使用。在所有初步配置完成后,调用fit接口开启训练执行过程,调用fit时只需要将前面定义好的训练数据集、测试数据集、训练轮次(Epoch)和批次大小(batch_size)配置好即可。
......@@ -640,12 +631,12 @@ Layer类,整个过程是把\ ``filter_size * filter_size * num_filters``\ 的C
epsilon=1e-07,
centered=False,
parameters=model.parameters())
model = paddle.Model(PetModel(num_classes))
model = paddle.Model(PetNet(num_classes))
model.prepare(optim, SoftmaxWithCrossEntropy())
model.fit(train_dataset,
val_dataset,
epochs=EPOCHS,
batch_size=BATCH_SIZE)
epochs=15,
batch_size=32)
6.模型预测
----------
......
......@@ -31,24 +31,22 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'2.0.0-beta0'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"2.0.0-beta0\n"
]
}
],
"source": [
"import paddle\n",
"\n",
"paddle.__version__"
"print(paddle.__version__)\n",
"paddle.disable_static()"
]
},
{
......@@ -71,7 +69,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
......@@ -90,7 +88,7 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
......@@ -113,7 +111,7 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 11,
"metadata": {
"scrolled": true
},
......@@ -123,15 +121,15 @@
"output_type": "stream",
"text": [
"Epoch 1/5\n",
"step 1875/1875 [==============================] - loss: 0.2571 - acc: 0.9037 - 10ms/step \n",
"step 1875/1875 [==============================] - loss: 0.2250 - acc: 0.9025 - 9ms/step \n",
"Epoch 2/5\n",
"step 1875/1875 [==============================] - loss: 0.1880 - acc: 0.9458 - 14ms/step \n",
"step 1875/1875 [==============================] - loss: 0.0969 - acc: 0.9462 - 13ms/step \n",
"Epoch 3/5\n",
"step 1875/1875 [==============================] - loss: 0.0279 - acc: 0.9549 - 11ms/step \n",
"step 1875/1875 [==============================] - loss: 0.1035 - acc: 0.9550 - 12ms/step \n",
"Epoch 4/5\n",
"step 1875/1875 [==============================] - loss: 0.0505 - acc: 0.9608 - 13ms/step \n",
"step 1875/1875 [==============================] - loss: 0.0316 - acc: 0.9603 - 12ms/step \n",
"Epoch 5/5\n",
"step 1875/1875 [==============================] - loss: 0.2253 - acc: 0.9646 - 12ms/step \n"
"step 1875/1875 [==============================] - loss: 0.1771 - acc: 0.9637 - 12ms/step \n"
]
}
],
......@@ -189,9 +187,7 @@
"source": [
"那么初步训练得到的模型效果在97%附近,我们可以进一步通过调整其中的训练参数来提升我们的模型精度。\n",
"\n",
"至此我们可以知道如何通过飞桨的几个简单API来快速完成一个深度学习任务,大家可以针对自己的需求来更换其中的代码,如果需要使用自己的数据集,那么可以更换数据集加载部分程序,如果需要替换模型,那么可以更改模型代码实现等等。我们也为大家提供了很多其他场景的示例代码来教大家如何使用我们的飞桨API,大家可以查看下面的链接或通过页面导航来查看自己感兴趣的部分。\n",
"\n",
"TODO:补充其他示例教程的快速链接。"
"至此我们可以知道如何通过飞桨的几个简单API来快速完成一个深度学习任务,大家可以针对自己的需求来更换其中的代码,如果需要使用自己的数据集,那么可以更换数据集加载部分程序,如果需要替换模型,那么可以更改模型代码实现等等。我们也为大家提供了很多其他场景的示例代码来教大家如何使用我们的飞桨API,大家可以查看下面的链接或通过页面导航来查看自己感兴趣的部分。"
]
}
],
......
......@@ -19,15 +19,13 @@
import paddle
paddle.__version__
print(paddle.__version__)
paddle.disable_static()
.. parsed-literal::
'2.0.0-beta0'
2.0.0-beta0
3. 实践一个手写数字识别任务
......@@ -91,15 +89,15 @@
.. parsed-literal::
Epoch 1/5
step 1875/1875 [==============================] - loss: 0.2571 - acc: 0.9037 - 10ms/step
step 1875/1875 [==============================] - loss: 0.2250 - acc: 0.9025 - 9ms/step
Epoch 2/5
step 1875/1875 [==============================] - loss: 0.1880 - acc: 0.9458 - 14ms/step
step 1875/1875 [==============================] - loss: 0.0969 - acc: 0.9462 - 13ms/step
Epoch 3/5
step 1875/1875 [==============================] - loss: 0.0279 - acc: 0.9549 - 11ms/step
step 1875/1875 [==============================] - loss: 0.1035 - acc: 0.9550 - 12ms/step
Epoch 4/5
step 1875/1875 [==============================] - loss: 0.0505 - acc: 0.9608 - 13ms/step
step 1875/1875 [==============================] - loss: 0.0316 - acc: 0.9603 - 12ms/step
Epoch 5/5
step 1875/1875 [==============================] - loss: 0.2253 - acc: 0.9646 - 12ms/step
step 1875/1875 [==============================] - loss: 0.1771 - acc: 0.9637 - 12ms/step
3.4 模型评估
......
......@@ -8,7 +8,7 @@
"\n",
"## 1. 简介\n",
"\n",
"飞桨2.0全新推出高层API,是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。\n",
"飞桨框架2.0全新推出高层API,是对飞桨API的进一步封装与升级,提供了更加简洁易用的API,进一步提升了飞桨的易学易用性,并增强飞桨的功能。\n",
"\n",
"飞桨高层API面向从深度学习小白到资深开发者的所有人群,对于AI初学者来说,使用高层API可以简单快速的构建深度学习项目,对于资深开发者来说,可以快速完成算法迭代。\n",
"\n",
......@@ -36,7 +36,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"outputs": [
{
......@@ -45,7 +45,7 @@
"'2.0.0-beta0'"
]
},
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
......@@ -55,6 +55,9 @@
"import paddle.vision as vision\n",
"import paddle.text as text\n",
"\n",
"# 启动动态图训练模式\n",
"paddle.disable_static()\n",
"\n",
"paddle.__version__"
]
},
......@@ -90,7 +93,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 3,
"metadata": {
"tags": []
},
......@@ -118,7 +121,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
......@@ -140,7 +143,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 5,
"metadata": {
"tags": []
},
......@@ -207,8 +210,8 @@
" return len(self.data)\n",
"\n",
"# 测试定义的数据集\n",
"train_dataset = MyDataset(mode='train')\n",
"val_dataset = MyDataset(mode='test')\n",
"train_dataset_2 = MyDataset(mode='train')\n",
"val_dataset_2 = MyDataset(mode='test')\n",
"\n",
"print('=============train dataset=============')\n",
"for data, label in train_dataset:\n",
......@@ -232,7 +235,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
......@@ -243,7 +246,7 @@
"transform = Compose([ColorJitter(), Resize(size=100)])\n",
"\n",
"# 通过transform参数传递定义好的数据增项方法即可完成对自带数据集的应用\n",
"train_dataset = vision.datasets.MNIST(mode='train', transform=transform)"
"train_dataset_3 = vision.datasets.MNIST(mode='train', transform=transform)"
]
},
{
......@@ -257,7 +260,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
......@@ -316,7 +319,7 @@
},
{
"cell_type": "code",
"execution_count": 28,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
......@@ -340,7 +343,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
......@@ -364,7 +367,7 @@
"\n",
" return y\n",
"\n",
"mnist = Mnist()"
"mnist_2 = Mnist()"
]
},
{
......@@ -380,14 +383,12 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"# 场景1:动态图模式\n",
"\n",
"# 启动动态图训练模式\n",
"paddle.disable_static()\n",
"# 使用GPU训练\n",
"paddle.set_device('gpu')\n",
"# 模型封装\n",
......@@ -412,9 +413,45 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 33,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--------------------------------------------------------------------------------\n",
" Layer (type) Input Shape Output Shape Param #\n",
"================================================================================\n",
" Flatten-57509 [-1, 1, 28, 28] [-1, 784] 0\n",
" Linear-7 [-1, 784] [-1, 512] 401,920\n",
" ReLU-4 [-1, 512] [-1, 512] 0\n",
" Dropout-4 [-1, 512] [-1, 512] 0\n",
" Linear-8 [-1, 512] [-1, 10] 5,130\n",
"================================================================================\n",
"Total params: 407,050\n",
"Trainable params: 407,050\n",
"Non-trainable params: 0\n",
"--------------------------------------------------------------------------------\n",
"Input size (MB): 0.00\n",
"Forward/backward pass size (MB): 0.02\n",
"Params size (MB): 1.55\n",
"Estimated Total Size (MB): 1.57\n",
"--------------------------------------------------------------------------------\n",
"\n"
]
},
{
"data": {
"text/plain": [
"{'total_params': 407050, 'trainable_params': 407050}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.summary((1, 28, 28))"
]
......@@ -428,9 +465,45 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 34,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--------------------------------------------------------------------------------\n",
" Layer (type) Input Shape Output Shape Param #\n",
"================================================================================\n",
" Flatten-57508 [-1, 1, 28, 28] [-1, 784] 0\n",
" Linear-5 [-1, 784] [-1, 512] 401,920\n",
" ReLU-3 [-1, 512] [-1, 512] 0\n",
" Dropout-3 [-1, 512] [-1, 512] 0\n",
" Linear-6 [-1, 512] [-1, 10] 5,130\n",
"================================================================================\n",
"Total params: 407,050\n",
"Trainable params: 407,050\n",
"Non-trainable params: 0\n",
"--------------------------------------------------------------------------------\n",
"Input size (MB): 0.00\n",
"Forward/backward pass size (MB): 0.02\n",
"Params size (MB): 1.55\n",
"Estimated Total Size (MB): 1.57\n",
"--------------------------------------------------------------------------------\n",
"\n"
]
},
{
"data": {
"text/plain": [
"{'total_params': 407050, 'trainable_params': 407050}"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"paddle.summary(mnist, (1, 28, 28))"
]
......@@ -456,7 +529,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
......@@ -475,9 +548,36 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 36,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"step 1875/1875 [==============================] - loss: 0.1600 - acc: 0.9022 - 10ms/step \n",
"Epoch 2/10\n",
"step 1875/1875 [==============================] - loss: 0.0455 - acc: 0.9461 - 12ms/step \n",
"Epoch 3/10\n",
"step 1875/1875 [==============================] - loss: 0.1429 - acc: 0.9544 - 19ms/step \n",
"Epoch 4/10\n",
"step 1875/1875 [==============================] - loss: 0.0197 - acc: 0.9601 - 22ms/step \n",
"Epoch 5/10\n",
"step 1875/1875 [==============================] - loss: 0.1762 - acc: 0.9644 - 25ms/step \n",
"Epoch 6/10\n",
"step 1875/1875 [==============================] - loss: 0.1304 - acc: 0.9667 - 22ms/step \n",
"Epoch 7/10\n",
"step 1875/1875 [==============================] - loss: 0.0133 - acc: 0.9682 - 22ms/step \n",
"Epoch 8/10\n",
"step 1875/1875 [==============================] - loss: 0.0097 - acc: 0.9705 - 19ms/step \n",
"Epoch 9/10\n",
"step 1875/1875 [==============================] - loss: 3.1264e-04 - acc: 0.9716 - 23ms/step \n",
"Epoch 10/10\n",
"step 1875/1875 [==============================] - loss: 0.0767 - acc: 0.9729 - 13ms/step \n"
]
}
],
"source": [
"# 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式\n",
"model.fit(train_dataset, \n",
......@@ -497,14 +597,39 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 20,
"metadata": {
"tags": []
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"step 1875/1875 [==============================] - loss: 0.0490 - acc: 0.9741 - 6ms/step \n",
"Epoch 2/10\n",
"step 1875/1875 [==============================] - loss: 0.1384 - acc: 0.9760 - 7ms/step \n",
"Epoch 3/10\n",
"step 1875/1875 [==============================] - loss: 0.0929 - acc: 0.9767 - 7ms/step \n",
"Epoch 4/10\n",
"step 1875/1875 [==============================] - loss: 0.0190 - acc: 0.9772 - 6ms/step \n",
"Epoch 5/10\n",
"step 1875/1875 [==============================] - loss: 0.0862 - acc: 0.9774 - 7ms/step \n",
"Epoch 6/10\n",
"step 1875/1875 [==============================] - loss: 0.0748 - acc: 0.9785 - 8ms/step \n",
"Epoch 7/10\n",
"step 1875/1875 [==============================] - loss: 0.0039 - acc: 0.9798 - 17ms/step \n",
"Epoch 8/10\n",
"step 1875/1875 [==============================] - loss: 0.0037 - acc: 0.9808 - 11ms/step \n",
"Epoch 9/10\n",
"step 1875/1875 [==============================] - loss: 0.0013 - acc: 0.9800 - 8ms/step \n",
"Epoch 10/10\n",
"step 1875/1875 [==============================] - loss: 0.0376 - acc: 0.9810 - 8ms/step \n"
]
}
],
"source": [
"# 启动动态图训练模式\n",
"paddle.disable_static()\n",
"\n",
"# 使用GPU训练\n",
"paddle.set_device('gpu')\n",
......@@ -530,17 +655,13 @@
"source": [
"### 5.2 单机多卡\n",
"\n",
"对于高层API来实现单机多卡非常简单,整个训练代码和单机单卡没有差异。直接使用`paddle.distributed.launch`启动单机单卡的程序即可。"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# train.py里面包含的就是单机单卡代码\n",
"python -m paddle.distributed.launch train.py"
"对于高层API来实现单机多卡非常简单,整个训练代码和单机单卡没有差异。直接使用`paddle.distributed.launch`启动单机单卡的程序即可。\n",
"\n",
"```bash\n",
"$ python -m paddle.distributed.launch train.py\n",
"```\n",
"\n",
"train.py里面包含的就是单机单卡代码"
]
},
{
......@@ -810,9 +931,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 21,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Eval begin...\n",
"step 10000/10000 [==============================] - loss: 0.0000e+00 - acc: 0.9801 - 2ms/step \n",
"Eval samples: 10000\n"
]
}
],
"source": [
"result = model.evaluate(val_dataset, verbose=1)"
]
......@@ -834,9 +965,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Predict begin...\n",
"step 10000/10000 [==============================] - 4ms/step \n",
"Predict samples: 10000\n"
]
}
],
"source": [
"pred_result = model.predict(val_dataset)"
]
......@@ -866,17 +1007,40 @@
"\n",
"### 8.1 模型存储\n",
"\n",
"模型训练和验证达到我们的预期后,可以使用`save`接口来将我们的模型保存下来,用于后续模型的Fine-tuning(接口参数training=True)或推理部署(接口参数training=False)。"
"模型训练和验证达到我们的预期后,可以使用`save`接口来将我们的模型保存下来,用于后续模型的Fine-tuning(接口参数training=True)或推理部署(接口参数training=False)。\n",
"\n",
"需要注意的是,在动态图模式训练时保存推理模型的参数文件和模型文件,需要在forward成员函数上添加@paddle.jit.to_static装饰器,参考下面的例子:\n",
"\n",
"```python\n",
"class Mnist(paddle.nn.Layer):\n",
" def __init__(self):\n",
" super(Mnist, self).__init__()\n",
"\n",
" self.flatten = paddle.nn.Flatten()\n",
" self.linear_1 = paddle.nn.Linear(784, 512)\n",
" self.linear_2 = paddle.nn.Linear(512, 10)\n",
" self.relu = paddle.nn.ReLU()\n",
" self.dropout = paddle.nn.Dropout(0.2)\n",
"\n",
" @paddle.jit.to_static\n",
" def forward(self, inputs):\n",
" y = self.flatten(inputs)\n",
" y = self.linear_1(y)\n",
" y = self.relu(y)\n",
" y = self.dropout(y)\n",
" y = self.linear_2(y)\n",
"\n",
" return y\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"# 保存用于推理部署的模型(training=False)\n",
"model.save('~/model/mnist', training=False)"
"model.save('~/model/mnist')"
]
},
{
......
......@@ -38,6 +38,9 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
import paddle.vision as vision
import paddle.text as text
# 启动动态图训练模式
paddle.disable_static()
paddle.__version__
......@@ -145,8 +148,8 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
return len(self.data)
# 测试定义的数据集
train_dataset = MyDataset(mode='train')
val_dataset = MyDataset(mode='test')
train_dataset_2 = MyDataset(mode='train')
val_dataset_2 = MyDataset(mode='test')
print('=============train dataset=============')
for data, label in train_dataset:
......@@ -188,7 +191,7 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
transform = Compose([ColorJitter(), Resize(size=100)])
# 通过transform参数传递定义好的数据增项方法即可完成对自带数据集的应用
train_dataset = vision.datasets.MNIST(mode='train', transform=transform)
train_dataset_3 = vision.datasets.MNIST(mode='train', transform=transform)
3.3.2 自定义数据集
^^^^^^^^^^^^^^^^^^
......@@ -284,7 +287,7 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
return y
mnist = Mnist()
mnist_2 = Mnist()
4.3 模型封装
~~~~~~~~~~~~
......@@ -297,8 +300,6 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
# 场景1:动态图模式
# 启动动态图训练模式
paddle.disable_static()
# 使用GPU训练
paddle.set_device('gpu')
# 模型封装
......@@ -320,12 +321,76 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
model.summary((1, 28, 28))
.. parsed-literal::
--------------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
================================================================================
Flatten-57509 [-1, 1, 28, 28] [-1, 784] 0
Linear-7 [-1, 784] [-1, 512] 401,920
ReLU-4 [-1, 512] [-1, 512] 0
Dropout-4 [-1, 512] [-1, 512] 0
Linear-8 [-1, 512] [-1, 10] 5,130
================================================================================
Total params: 407,050
Trainable params: 407,050
Non-trainable params: 0
--------------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.02
Params size (MB): 1.55
Estimated Total Size (MB): 1.57
--------------------------------------------------------------------------------
.. parsed-literal::
{'total_params': 407050, 'trainable_params': 407050}
另外,summary接口有两种使用方式,下面我们通过两个示例来做展示,除了\ ``Model.summary``\ 这种配套\ ``paddle.Model``\ 封装使用的接口外,还有一套配合没有经过\ ``paddle.Model``\ 封装的方式来使用。可以直接将实例化好的Layer子类放到\ ``paddle.summary``\ 接口中进行可视化呈现。
.. code:: ipython3
paddle.summary(mnist, (1, 28, 28))
.. parsed-literal::
--------------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
================================================================================
Flatten-57508 [-1, 1, 28, 28] [-1, 784] 0
Linear-5 [-1, 784] [-1, 512] 401,920
ReLU-3 [-1, 512] [-1, 512] 0
Dropout-3 [-1, 512] [-1, 512] 0
Linear-6 [-1, 512] [-1, 10] 5,130
================================================================================
Total params: 407,050
Trainable params: 407,050
Non-trainable params: 0
--------------------------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.02
Params size (MB): 1.55
Estimated Total Size (MB): 1.57
--------------------------------------------------------------------------------
.. parsed-literal::
{'total_params': 407050, 'trainable_params': 407050}
这里面有一个注意的点,有的用户可能会疑惑为什么要传递\ ``(1, 28, 28)``\ 这个input_size参数,因为在动态图中,网络定义阶段是还没有得到输入数据的形状信息,我们想要做网络结构的呈现就无从下手,那么我们通过告知接口网络结构的输入数据形状,这样网络可以通过逐层的计算推导得到完整的网络结构信息进行呈现。如果是动态图运行模式,那么就不需要给summary接口传递输入数据形状这个值了,因为在Model封装的时候我们已经定义好了InputSpec,其中包含了输入数据的形状格式。
5. 模型训练
......@@ -352,6 +417,31 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
batch_size=32,
verbose=1)
.. parsed-literal::
Epoch 1/10
step 1875/1875 [==============================] - loss: 0.1600 - acc: 0.9022 - 10ms/step
Epoch 2/10
step 1875/1875 [==============================] - loss: 0.0455 - acc: 0.9461 - 12ms/step
Epoch 3/10
step 1875/1875 [==============================] - loss: 0.1429 - acc: 0.9544 - 19ms/step
Epoch 4/10
step 1875/1875 [==============================] - loss: 0.0197 - acc: 0.9601 - 22ms/step
Epoch 5/10
step 1875/1875 [==============================] - loss: 0.1762 - acc: 0.9644 - 25ms/step
Epoch 6/10
step 1875/1875 [==============================] - loss: 0.1304 - acc: 0.9667 - 22ms/step
Epoch 7/10
step 1875/1875 [==============================] - loss: 0.0133 - acc: 0.9682 - 22ms/step
Epoch 8/10
step 1875/1875 [==============================] - loss: 0.0097 - acc: 0.9705 - 19ms/step
Epoch 9/10
step 1875/1875 [==============================] - loss: 3.1264e-04 - acc: 0.9716 - 23ms/step
Epoch 10/10
step 1875/1875 [==============================] - loss: 0.0767 - acc: 0.9729 - 13ms/step
5.1 单机单卡
~~~~~~~~~~~~
......@@ -359,8 +449,6 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
.. code:: ipython3
# 启动动态图训练模式
paddle.disable_static()
# 使用GPU训练
paddle.set_device('gpu')
......@@ -379,15 +467,41 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
batch_size=32,
verbose=1)
.. parsed-literal::
Epoch 1/10
step 1875/1875 [==============================] - loss: 0.0490 - acc: 0.9741 - 6ms/step
Epoch 2/10
step 1875/1875 [==============================] - loss: 0.1384 - acc: 0.9760 - 7ms/step
Epoch 3/10
step 1875/1875 [==============================] - loss: 0.0929 - acc: 0.9767 - 7ms/step
Epoch 4/10
step 1875/1875 [==============================] - loss: 0.0190 - acc: 0.9772 - 6ms/step
Epoch 5/10
step 1875/1875 [==============================] - loss: 0.0862 - acc: 0.9774 - 7ms/step
Epoch 6/10
step 1875/1875 [==============================] - loss: 0.0748 - acc: 0.9785 - 8ms/step
Epoch 7/10
step 1875/1875 [==============================] - loss: 0.0039 - acc: 0.9798 - 17ms/step
Epoch 8/10
step 1875/1875 [==============================] - loss: 0.0037 - acc: 0.9808 - 11ms/step
Epoch 9/10
step 1875/1875 [==============================] - loss: 0.0013 - acc: 0.9800 - 8ms/step
Epoch 10/10
step 1875/1875 [==============================] - loss: 0.0376 - acc: 0.9810 - 8ms/step
5.2 单机多卡
~~~~~~~~~~~~
对于高层API来实现单机多卡非常简单,整个训练代码和单机单卡没有差异。直接使用\ ``paddle.distributed.launch``\ 启动单机单卡的程序即可。
.. code:: ipython3
.. code:: bash
$ python -m paddle.distributed.launch train.py
# train.py里面包含的就是单机单卡代码
python -m paddle.distributed.launch train.py
train.py里面包含的就是单机单卡代码
5.3 自定义Loss
~~~~~~~~~~~~~~
......@@ -640,6 +754,14 @@ paddle即可使用相关高层API,如:paddle.Model、视觉领域paddle.visi
result = model.evaluate(val_dataset, verbose=1)
.. parsed-literal::
Eval begin...
step 10000/10000 [==============================] - loss: 0.0000e+00 - acc: 0.9801 - 2ms/step
Eval samples: 10000
7. 模型预测
-----------
......@@ -657,6 +779,14 @@ numpy_ndarray_n是对应原始数据经过模型计算后得到的预测数据
pred_result = model.predict(val_dataset)
.. parsed-literal::
Predict begin...
step 10000/10000 [==============================] - 4ms/step
Predict samples: 10000
7.1 使用多卡进行预测
~~~~~~~~~~~~~~~~~~~~
......@@ -678,10 +808,33 @@ infer.py里面就是包含model.predict的代码程序。
模型训练和验证达到我们的预期后,可以使用\ ``save``\ 接口来将我们的模型保存下来,用于后续模型的Fine-tuning(接口参数training=True)或推理部署(接口参数training=False)。
需要注意的是,在动态图模式训练时保存推理模型的参数文件和模型文件,需要在forward成员函数上添加@paddle.jit.to_static装饰器,参考下面的例子:
.. code:: python
class Mnist(paddle.nn.Layer):
def __init__(self):
super(Mnist, self).__init__()
self.flatten = paddle.nn.Flatten()
self.linear_1 = paddle.nn.Linear(784, 512)
self.linear_2 = paddle.nn.Linear(512, 10)
self.relu = paddle.nn.ReLU()
self.dropout = paddle.nn.Dropout(0.2)
@paddle.jit.to_static
def forward(self, inputs):
y = self.flatten(inputs)
y = self.linear_1(y)
y = self.relu(y)
y = self.dropout(y)
y = self.linear_2(y)
return y
.. code:: ipython3
# 保存用于推理部署的模型(training=False
model.save('~/model/mnist', training=False)
model.save('~/model/mnist')
8.2 预测部署
~~~~~~~~~~~~
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册