From 54a9ba959f2f1ea530107892c66e7eeae32728ed Mon Sep 17 00:00:00 2001 From: ceci3 <592712189@qq.com> Date: Sat, 12 Oct 2019 14:26:20 +0000 Subject: [PATCH] -1->None --- 01.fit_a_line/README.cn.md | 4 ++-- 01.fit_a_line/README.md | 4 ++-- 01.fit_a_line/index.cn.html | 4 ++-- 01.fit_a_line/index.html | 4 ++-- 01.fit_a_line/train.py | 4 ++-- 02.recognize_digits/README.cn.md | 8 ++++---- 02.recognize_digits/README.md | 8 ++++---- 02.recognize_digits/index.cn.html | 8 ++++---- 02.recognize_digits/index.html | 8 ++++---- 02.recognize_digits/train.py | 4 ++-- 09.gan/README.cn.md | 8 ++++---- 09.gan/dc_gan.py | 8 ++++---- 09.gan/index.cn.html | 8 ++++---- 13 files changed, 40 insertions(+), 40 deletions(-) diff --git a/01.fit_a_line/README.cn.md b/01.fit_a_line/README.cn.md index 715bc98..69cc494 100644 --- a/01.fit_a_line/README.cn.md +++ b/01.fit_a_line/README.cn.md @@ -194,8 +194,8 @@ test_reader = paddle.batch( 训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 ```python -x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # 定义输入的形状和数据类型 -y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # 定义输出的形状和数据类型 +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # 定义输入的形状和数据类型 +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # 定义输出的形状和数据类型 y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层 main_program = fluid.default_main_program() # 获取默认/全局主函数 diff --git a/01.fit_a_line/README.md b/01.fit_a_line/README.md index 29fdc1a..8760bbf 100644 --- a/01.fit_a_line/README.md +++ b/01.fit_a_line/README.md @@ -196,8 +196,8 @@ test_reader = paddle.batch( The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation. ```python -x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # define shape and data type of input -y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # define shape and data type of output +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # define shape and data type of input +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # define shape and data type of output y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output main_program = fluid.default_main_program() # get default/global main function diff --git a/01.fit_a_line/index.cn.html b/01.fit_a_line/index.cn.html index 40d9128..e47cda5 100644 --- a/01.fit_a_line/index.cn.html +++ b/01.fit_a_line/index.cn.html @@ -236,8 +236,8 @@ test_reader = paddle.batch( 训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 ```python -x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # 定义输入的形状和数据类型 -y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # 定义输出的形状和数据类型 +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # 定义输入的形状和数据类型 +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # 定义输出的形状和数据类型 y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层 main_program = fluid.default_main_program() # 获取默认/全局主函数 diff --git a/01.fit_a_line/index.html b/01.fit_a_line/index.html index 184e825..70e7b0e 100644 --- a/01.fit_a_line/index.html +++ b/01.fit_a_line/index.html @@ -238,8 +238,8 @@ test_reader = paddle.batch( The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation. ```python -x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # define shape and data type of input -y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # define shape and data type of output +x = fluid.data(name='x', shape=[None, 13], dtype='float32') # define shape and data type of input +y = fluid.data(name='y', shape=[None, 1], dtype='float32') # define shape and data type of output y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output main_program = fluid.default_main_program() # get default/global main function diff --git a/01.fit_a_line/train.py b/01.fit_a_line/train.py index 2ebe21e..e708898 100644 --- a/01.fit_a_line/train.py +++ b/01.fit_a_line/train.py @@ -87,8 +87,8 @@ def main(): batch_size=batch_size) # feature vector of length 13 - x = fluid.data(name='x', shape=[-1, 13], dtype='float32') - y = fluid.data(name='y', shape=[-1, 1], dtype='float32') + x = fluid.data(name='x', shape=[None, 13], dtype='float32') + y = fluid.data(name='y', shape=[None, 1], dtype='float32') main_program = fluid.default_main_program() startup_program = fluid.default_startup_program() diff --git a/02.recognize_digits/README.cn.md b/02.recognize_digits/README.cn.md index 31c2adb..b362119 100644 --- a/02.recognize_digits/README.cn.md +++ b/02.recognize_digits/README.cn.md @@ -209,7 +209,7 @@ def softmax_regression(): predict_image -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10 predict = fluid.layers.fc( input=img, size=10, act='softmax') @@ -229,7 +229,7 @@ def multilayer_perceptron(): predict_image -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 第一个全连接层,激活函数为ReLU hidden = fluid.layers.fc(input=img, size=200, act='relu') # 第二个全连接层,激活函数为ReLU @@ -251,7 +251,7 @@ def convolutional_neural_network(): predict -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 第一个卷积-池化层 # 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu conv_pool_1 = fluid.nets.simple_img_conv_pool( @@ -296,7 +296,7 @@ def train_program(): """ # 标签层,名称为label,对应输入图片的类别标签 - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') # predict = softmax_regression() # 取消注释将使用 Softmax回归 # predict = multilayer_perceptron() # 取消注释将使用 多层感知器 diff --git a/02.recognize_digits/README.md b/02.recognize_digits/README.md index b3ca3c8..47691ef 100644 --- a/02.recognize_digits/README.md +++ b/02.recognize_digits/README.md @@ -188,7 +188,7 @@ def softmax_regression(): predict_image -- result of classification """ # input original image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # With softmax as the fully connected layer of the activation function, the size of the output layer must be 10 predict = fluid.layers.fc( input=img, size=10, act='softmax') @@ -208,7 +208,7 @@ def multilayer_perceptron(): predict_image -- result of classification """ # input raw image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # the first fully connected layer, whose activation function is ReLU hidden = fluid.layers.fc(input=img, size=200, act='relu') # the second fully connected layer, whose activation function is ReLU @@ -230,7 +230,7 @@ def convolutional_neural_network(): predict -- result of classification """ # input raw image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # the first convolution-pooling layer # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. conv_pool_1 = fluid.nets.simple_img_conv_pool( @@ -275,7 +275,7 @@ def train_program(): """ # label layer, called label, correspondent with label category of input picture - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') # predict = softmax_regression() # cancel note and run Softmax regression # predict = multilayer_perceptron() # cancel note and run multiple perceptron diff --git a/02.recognize_digits/index.cn.html b/02.recognize_digits/index.cn.html index 3288152..d3f82cd 100644 --- a/02.recognize_digits/index.cn.html +++ b/02.recognize_digits/index.cn.html @@ -251,7 +251,7 @@ def softmax_regression(): predict_image -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10 predict = fluid.layers.fc( input=img, size=10, act='softmax') @@ -271,7 +271,7 @@ def multilayer_perceptron(): predict_image -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 第一个全连接层,激活函数为ReLU hidden = fluid.layers.fc(input=img, size=200, act='relu') # 第二个全连接层,激活函数为ReLU @@ -293,7 +293,7 @@ def convolutional_neural_network(): predict -- 分类的结果 """ # 输入的原始图像数据,大小为28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # 第一个卷积-池化层 # 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu conv_pool_1 = fluid.nets.simple_img_conv_pool( @@ -338,7 +338,7 @@ def train_program(): """ # 标签层,名称为label,对应输入图片的类别标签 - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') # predict = softmax_regression() # 取消注释将使用 Softmax回归 # predict = multilayer_perceptron() # 取消注释将使用 多层感知器 diff --git a/02.recognize_digits/index.html b/02.recognize_digits/index.html index 9ab5b1e..04ff588 100644 --- a/02.recognize_digits/index.html +++ b/02.recognize_digits/index.html @@ -230,7 +230,7 @@ def softmax_regression(): predict_image -- result of classification """ # input original image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # With softmax as the fully connected layer of the activation function, the size of the output layer must be 10 predict = fluid.layers.fc( input=img, size=10, act='softmax') @@ -250,7 +250,7 @@ def multilayer_perceptron(): predict_image -- result of classification """ # input raw image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # the first fully connected layer, whose activation function is ReLU hidden = fluid.layers.fc(input=img, size=200, act='relu') # the second fully connected layer, whose activation function is ReLU @@ -272,7 +272,7 @@ def convolutional_neural_network(): predict -- result of classification """ # input raw image data in size of 28*28*1 - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') # the first convolution-pooling layer # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. conv_pool_1 = fluid.nets.simple_img_conv_pool( @@ -317,7 +317,7 @@ def train_program(): """ # label layer, called label, correspondent with label category of input picture - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') # predict = softmax_regression() # cancel note and run Softmax regression # predict = multilayer_perceptron() # cancel note and run multiple perceptron diff --git a/02.recognize_digits/train.py b/02.recognize_digits/train.py index 27cb3c7..1d1fa9a 100644 --- a/02.recognize_digits/train.py +++ b/02.recognize_digits/train.py @@ -101,8 +101,8 @@ def train(nn_type, test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) - img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') if nn_type == 'softmax_regression': net_conf = softmax_regression diff --git a/09.gan/README.cn.md b/09.gan/README.cn.md index 99cf01e..4c0fd22 100644 --- a/09.gan/README.cn.md +++ b/09.gan/README.cn.md @@ -265,16 +265,16 @@ dg_program = fluid.Program() # 定义判别真实图片的program with fluid.program_guard(d_program): # 输入图片大小为28*28=784 - img = fluid.layers.data(name='img', shape=[784], dtype='float32') + img = fluid.data(name='img', shape=[None, 784], dtype='float32') # 标签shape=1 - label = fluid.layers.data(name='label', shape=[1], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='float32') d_logit = D(img) d_loss = loss(d_logit, label) # 定义判别生成图片的program with fluid.program_guard(dg_program): - noise = fluid.layers.data( - name='noise', shape=[NOISE_SIZE], dtype='float32') + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') # 噪声数据作为输入得到生成图片 g_img = G(x=noise) diff --git a/09.gan/dc_gan.py b/09.gan/dc_gan.py index c08752a..7234dc8 100644 --- a/09.gan/dc_gan.py +++ b/09.gan/dc_gan.py @@ -60,14 +60,14 @@ def train(args): dg_program = fluid.Program() with fluid.program_guard(d_program): - img = fluid.layers.data(name='img', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='float32') + img = fluid.data(name='img', shape=[None, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='float32') d_logit = D(img) d_loss = loss(d_logit, label) with fluid.program_guard(dg_program): - noise = fluid.layers.data( - name='noise', shape=[NOISE_SIZE], dtype='float32') + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') g_img = G(x=noise) g_program = dg_program.clone() diff --git a/09.gan/index.cn.html b/09.gan/index.cn.html index 8947f5a..a6b88e8 100644 --- a/09.gan/index.cn.html +++ b/09.gan/index.cn.html @@ -307,16 +307,16 @@ dg_program = fluid.Program() # 定义判别真实图片的program with fluid.program_guard(d_program): # 输入图片大小为28*28=784 - img = fluid.layers.data(name='img', shape=[784], dtype='float32') + img = fluid.data(name='img', shape=[None, 784], dtype='float32') # 标签shape=1 - label = fluid.layers.data(name='label', shape=[1], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='float32') d_logit = D(img) d_loss = loss(d_logit, label) # 定义判别生成图片的program with fluid.program_guard(dg_program): - noise = fluid.layers.data( - name='noise', shape=[NOISE_SIZE], dtype='float32') + noise = fluid.data( + name='noise', shape=[None, NOISE_SIZE], dtype='float32') # 噪声数据作为输入得到生成图片 g_img = G(x=noise) -- GitLab