diff --git a/paddle2.0_docs/getting_started/getting_started.ipynb b/paddle2.0_docs/getting_started/getting_started.ipynb index f2a27f4d2d297a037ab338093856eec453590370..33b2afdc358a11006a32e47766c3341d94894920 100644 --- a/paddle2.0_docs/getting_started/getting_started.ipynb +++ b/paddle2.0_docs/getting_started/getting_started.ipynb @@ -31,16 +31,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'0.0.0'" + "'2.0.0-beta0'" ] }, - "execution_count": 4, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } diff --git a/paddle2.0_docs/high_level_api/high_level_api.ipynb b/paddle2.0_docs/high_level_api/high_level_api.ipynb index 9e883deef893695bffd3173c7e8d979fbcc87cad..feeb7068ee718b97632546f2e6af1dcfe5b6878b 100644 --- a/paddle2.0_docs/high_level_api/high_level_api.ipynb +++ b/paddle2.0_docs/high_level_api/high_level_api.ipynb @@ -1,25 +1,4 @@ { - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.4-final" - }, - "orig_nbformat": 2, - "kernelspec": { - "name": "python37464bitc4da1ac836094043840bff631bedbf7f", - "display_name": "Python 3.7.4 64-bit" - } - }, - "nbformat": 4, - "nbformat_minor": 2, "cells": [ { "cell_type": "markdown", @@ -57,16 +36,18 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "'0.0.0'" + "text/plain": [ + "'2.0.0-beta0'" + ] }, + "execution_count": 1, "metadata": {}, - "execution_count": 2 + "output_type": "execute_result" } ], "source": [ @@ -115,9 +96,12 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", - "text": "视觉相关数据集: ['DatasetFolder', 'ImageFolder', 'MNIST', 'Flowers', 'Cifar10', 'Cifar100', 'VOC2012']\n自然语言相关数据集: ['Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'MovieReviews', 'UCIHousing', 'WMT14', 'WMT16']\n" + "output_type": "stream", + "text": [ + "视觉相关数据集: ['DatasetFolder', 'ImageFolder', 'MNIST', 'Flowers', 'Cifar10', 'Cifar100', 'VOC2012']\n", + "自然语言相关数据集: ['Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'MovieReviews', 'UCIHousing', 'WMT14', 'WMT16']\n" + ] } ], "source": [ @@ -138,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "# 测试数据集\n", + "# 训练数据集\n", "train_dataset = vision.datasets.MNIST(mode='train')\n", "\n", "# 验证数据集\n", @@ -162,9 +146,20 @@ }, "outputs": [ { - "output_type": "stream", "name": "stdout", - "text": "=============train dataset=============\ntraindata1 label1\ntraindata2 label2\ntraindata3 label3\ntraindata4 label4\n=============evaluation dataset=============\ntestdata1 label1\ntestdata2 label2\ntestdata3 label3\ntestdata4 label4\n" + "output_type": "stream", + "text": [ + "=============train dataset=============\n", + "traindata1 label1\n", + "traindata2 label2\n", + "traindata3 label3\n", + "traindata4 label4\n", + "=============evaluation dataset=============\n", + "testdata1 label1\n", + "testdata2 label2\n", + "testdata3 label3\n", + "testdata4 label4\n" + ] } ], "source": [ @@ -893,5 +888,26 @@ "有了用于推理部署的模型,就可以使用推理部署框架来完成预测服务部署,具体可以参见:[预测部署](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/index_cn.html), 包括服务端部署、移动端部署和模型压缩。" ] } - ] -} \ No newline at end of file + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.4 64-bit", + "language": "python", + "name": "python37464bitc4da1ac836094043840bff631bedbf7f" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/paddle2.0_docs/image_segmentation/pets_image_segmentation_U_Net_like.ipynb b/paddle2.0_docs/image_segmentation/pets_image_segmentation_U_Net_like.ipynb index d439475e1614d1a7d04791c7ac1d1395586dc87f..9dd56ed42fb192d5bd9309c6aa64901e508c7214 100644 --- a/paddle2.0_docs/image_segmentation/pets_image_segmentation_U_Net_like.ipynb +++ b/paddle2.0_docs/image_segmentation/pets_image_segmentation_U_Net_like.ipynb @@ -7,7 +7,7 @@ "id": "ueGUN2EQeScw" }, "source": [ - "# 基于U型语义分割模型实现的宠物图像分割\n", + "# 基于U-Net卷积神经网络实现宠物图像分割\n", "\n", "本示例教程当前是基于2.0-beta版本Paddle做的案例实现,未来会随着2.0的系列版本发布进行升级。" ] @@ -34,16 +34,16 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'0.0.0'" + "'2.0.0-beta0'" ] }, - "execution_count": 2, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -105,7 +105,20 @@ "outputId": "3985783f-7166-4afa-f511-16427b3e2a71", "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 755M 100 755M 0 0 1707k 0 0:07:32 0:07:32 --:--:-- 2865k0 0:12:48 524k 0 0:13:34 0:02:41 0:10:53 668k 0 0:12:45 0:03:06 0:09:39 1702k 0 1221k 0 0:10:33 0:03:25 0:07:08 3108k37 282M 0 0 1243k 0 0:10:21 0:03:52 0:06:29 719k0:05:53 566k0 1237k 0 0:10:25 0:04:43 0:05:42 1593k 0 0:09:46 0:05:28 0:04:18 2952k 1467k 0 0:08:47 0:06:43 0:02:04 1711k\n", + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 18.2M 100 18.2M 0 0 1602k 0 0:00:11 0:00:11 --:--:-- 3226k\n" + ] + } + ], "source": [ "!curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\n", "!curl -O http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz\n", @@ -160,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 22, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -222,7 +235,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -375,7 +388,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 24, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -451,7 +464,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 25, "metadata": { "colab": {}, "colab_type": "code", @@ -514,7 +527,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 26, "metadata": { "colab": {}, "colab_type": "code", @@ -574,7 +587,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 27, "metadata": { "colab": {}, "colab_type": "code", @@ -596,7 +609,7 @@ " kernel_size=3, \n", " padding='same')\n", " self.bn = paddle.nn.BatchNorm2d(out_channels)\n", - " self.upsample = paddle.nn.UpSample(scale_factor=2.0)\n", + " self.upsample = paddle.nn.Upsample(scale_factor=2.0)\n", " self.residual_conv = paddle.nn.Conv2d(in_channels, \n", " out_channels, \n", " kernel_size=1, \n", @@ -635,7 +648,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 30, "metadata": { "colab": {}, "colab_type": "code", @@ -643,9 +656,9 @@ }, "outputs": [], "source": [ - "class PetModel(paddle.nn.Layer):\n", + "class PetNet(paddle.nn.Layer):\n", " def __init__(self, num_classes):\n", - " super(PetModel, self).__init__()\n", + " super(PetNet, self).__init__()\n", "\n", " self.conv_1 = paddle.nn.Conv2d(3, 32, \n", " kernel_size=3,\n", @@ -711,7 +724,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 31, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -730,32 +743,32 @@ "--------------------------------------------------------------------------------\n", " Layer (type) Input Shape Output Shape Param #\n", "================================================================================\n", - " Conv2d-1 [-1, 3, 160, 160] [-1, 32, 80, 80] 896\n", - " BatchNorm2d-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 64\n", - " ReLU-1 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", - " ReLU-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", - " Conv2d-12 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152\n", - " Conv2d-13 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024\n", - "SeparableConv2d-5 [-1, 128, 20, 20] [-1, 256, 20, 20] 0\n", - " BatchNorm2d-4 [-1, 256, 20, 20] [-1, 256, 20, 20] 512\n", - " Conv2d-14 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304\n", - " Conv2d-15 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792\n", - "SeparableConv2d-6 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", - " MaxPool2d-3 [-1, 256, 20, 20] [-1, 256, 10, 10] 0\n", - " Conv2d-16 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024\n", - " Encoder-3 [-1, 128, 20, 20] [-1, 256, 10, 10] 0\n", - " ReLU-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", - "ConvTranspose2d-7 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464\n", - " BatchNorm2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 64\n", - "ConvTranspose2d-8 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248\n", - " UpSample-4 [-1, 64, 80, 80] [-1, 64, 160, 160] 0\n", - " Conv2d-20 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080\n", - " Decoder-4 [-1, 64, 80, 80] [-1, 32, 160, 160] 0\n", - " Conv2d-21 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156\n", + " Conv2d-38 [-1, 3, 160, 160] [-1, 32, 80, 80] 896\n", + " BatchNorm2d-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n", + " ReLU-14 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", + " ReLU-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", + " Conv2d-49 [-1, 128, 20, 20] [-1, 128, 20, 20] 1,152\n", + " Conv2d-50 [-1, 128, 20, 20] [-1, 256, 20, 20] 33,024\n", + "SeparableConv2d-17 [-1, 128, 20, 20] [-1, 256, 20, 20] 0\n", + " BatchNorm2d-17 [-1, 256, 20, 20] [-1, 256, 20, 20] 1,024\n", + " Conv2d-51 [-1, 256, 20, 20] [-1, 256, 20, 20] 2,304\n", + " Conv2d-52 [-1, 256, 20, 20] [-1, 256, 20, 20] 65,792\n", + "SeparableConv2d-18 [-1, 256, 20, 20] [-1, 256, 20, 20] 0\n", + " MaxPool2d-9 [-1, 256, 20, 20] [-1, 256, 10, 10] 0\n", + " Conv2d-53 [-1, 128, 20, 20] [-1, 256, 10, 10] 33,024\n", + " Encoder-9 [-1, 128, 20, 20] [-1, 256, 10, 10] 0\n", + " ReLU-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 0\n", + "ConvTranspose2d-17 [-1, 64, 80, 80] [-1, 32, 80, 80] 18,464\n", + " BatchNorm2d-21 [-1, 32, 80, 80] [-1, 32, 80, 80] 128\n", + "ConvTranspose2d-18 [-1, 32, 80, 80] [-1, 32, 80, 80] 9,248\n", + " Upsample-8 [-1, 64, 80, 80] [-1, 64, 160, 160] 0\n", + " Conv2d-57 [-1, 64, 160, 160] [-1, 32, 160, 160] 2,080\n", + " Decoder-9 [-1, 64, 80, 80] [-1, 32, 160, 160] 0\n", + " Conv2d-58 [-1, 32, 160, 160] [-1, 4, 160, 160] 1,156\n", "================================================================================\n", - "Total params: 167,780\n", - "Trainable params: 167,780\n", - "Non-trainable params: 0\n", + "Total params: 168,420\n", + "Trainable params: 167,140\n", + "Non-trainable params: 1,280\n", "--------------------------------------------------------------------------------\n", "Input size (MB): 0.29\n", "Forward/backward pass size (MB): 43.16\n", @@ -768,10 +781,10 @@ { "data": { "text/plain": [ - "{'total_params': 167780, 'trainable_params': 167780}" + "{'total_params': 168420, 'trainable_params': 167140}" ] }, - "execution_count": 10, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -781,7 +794,7 @@ "\n", "paddle.disable_static()\n", "num_classes = 4\n", - "model = paddle.Model(PetModel(num_classes))\n", + "model = paddle.Model(PetNet(num_classes))\n", "model.summary((3, 160, 160))" ] }, @@ -809,7 +822,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": { "colab": {}, "colab_type": "code", @@ -837,7 +850,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 19, "metadata": { "colab": {}, "colab_type": "code", @@ -1022,5 +1035,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 4 }