提交 54a9ba95 编写于 作者: C ceci3

-1->None

上级 dc248340
...@@ -194,8 +194,8 @@ test_reader = paddle.batch( ...@@ -194,8 +194,8 @@ test_reader = paddle.batch(
训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。
```python ```python
x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # 定义输入的形状和数据类型 x = fluid.data(name='x', shape=[None, 13], dtype='float32') # 定义输入的形状和数据类型
y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # 定义输出的形状和数据类型 y = fluid.data(name='y', shape=[None, 1], dtype='float32') # 定义输出的形状和数据类型
y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层 y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层
main_program = fluid.default_main_program() # 获取默认/全局主函数 main_program = fluid.default_main_program() # 获取默认/全局主函数
......
...@@ -196,8 +196,8 @@ test_reader = paddle.batch( ...@@ -196,8 +196,8 @@ test_reader = paddle.batch(
The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation. The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation.
```python ```python
x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # define shape and data type of input x = fluid.data(name='x', shape=[None, 13], dtype='float32') # define shape and data type of input
y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # define shape and data type of output y = fluid.data(name='y', shape=[None, 1], dtype='float32') # define shape and data type of output
y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output
main_program = fluid.default_main_program() # get default/global main function main_program = fluid.default_main_program() # get default/global main function
......
...@@ -236,8 +236,8 @@ test_reader = paddle.batch( ...@@ -236,8 +236,8 @@ test_reader = paddle.batch(
训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。
```python ```python
x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # 定义输入的形状和数据类型 x = fluid.data(name='x', shape=[None, 13], dtype='float32') # 定义输入的形状和数据类型
y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # 定义输出的形状和数据类型 y = fluid.data(name='y', shape=[None, 1], dtype='float32') # 定义输出的形状和数据类型
y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层 y_predict = fluid.layers.fc(input=x, size=1, act=None) # 连接输入和输出的全连接层
main_program = fluid.default_main_program() # 获取默认/全局主函数 main_program = fluid.default_main_program() # 获取默认/全局主函数
......
...@@ -238,8 +238,8 @@ test_reader = paddle.batch( ...@@ -238,8 +238,8 @@ test_reader = paddle.batch(
The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation. The aim of the program for training is to define a network structure of a training model. For linear regression, it is a simple fully connected layer from input to output. More complex result, such as Convolutional Neural Network and Recurrent Neural Network, will be introduced in later chapters. It must return `mean error` as the first return value in program for training, for that `mean error` will be used for BackPropagation.
```python ```python
x = fluid.data(name='x', shape=[-1, 13], dtype='float32') # define shape and data type of input x = fluid.data(name='x', shape=[None, 13], dtype='float32') # define shape and data type of input
y = fluid.data(name='y', shape=[-1, 1], dtype='float32') # define shape and data type of output y = fluid.data(name='y', shape=[None, 1], dtype='float32') # define shape and data type of output
y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output y_predict = fluid.layers.fc(input=x, size=1, act=None) # fully connected layer connecting input and output
main_program = fluid.default_main_program() # get default/global main function main_program = fluid.default_main_program() # get default/global main function
......
...@@ -87,8 +87,8 @@ def main(): ...@@ -87,8 +87,8 @@ def main():
batch_size=batch_size) batch_size=batch_size)
# feature vector of length 13 # feature vector of length 13
x = fluid.data(name='x', shape=[-1, 13], dtype='float32') x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[-1, 1], dtype='float32') y = fluid.data(name='y', shape=[None, 1], dtype='float32')
main_program = fluid.default_main_program() main_program = fluid.default_main_program()
startup_program = fluid.default_startup_program() startup_program = fluid.default_startup_program()
......
...@@ -209,7 +209,7 @@ def softmax_regression(): ...@@ -209,7 +209,7 @@ def softmax_regression():
predict_image -- 分类的结果 predict_image -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10 # 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10
predict = fluid.layers.fc( predict = fluid.layers.fc(
input=img, size=10, act='softmax') input=img, size=10, act='softmax')
...@@ -229,7 +229,7 @@ def multilayer_perceptron(): ...@@ -229,7 +229,7 @@ def multilayer_perceptron():
predict_image -- 分类的结果 predict_image -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 第一个全连接层,激活函数为ReLU # 第一个全连接层,激活函数为ReLU
hidden = fluid.layers.fc(input=img, size=200, act='relu') hidden = fluid.layers.fc(input=img, size=200, act='relu')
# 第二个全连接层,激活函数为ReLU # 第二个全连接层,激活函数为ReLU
...@@ -251,7 +251,7 @@ def convolutional_neural_network(): ...@@ -251,7 +251,7 @@ def convolutional_neural_network():
predict -- 分类的结果 predict -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 第一个卷积-池化层 # 第一个卷积-池化层
# 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu # 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu
conv_pool_1 = fluid.nets.simple_img_conv_pool( conv_pool_1 = fluid.nets.simple_img_conv_pool(
...@@ -296,7 +296,7 @@ def train_program(): ...@@ -296,7 +296,7 @@ def train_program():
""" """
# 标签层,名称为label,对应输入图片的类别标签 # 标签层,名称为label,对应输入图片的类别标签
label = fluid.data(name='label', shape=[-1, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# predict = softmax_regression() # 取消注释将使用 Softmax回归 # predict = softmax_regression() # 取消注释将使用 Softmax回归
# predict = multilayer_perceptron() # 取消注释将使用 多层感知器 # predict = multilayer_perceptron() # 取消注释将使用 多层感知器
......
...@@ -188,7 +188,7 @@ def softmax_regression(): ...@@ -188,7 +188,7 @@ def softmax_regression():
predict_image -- result of classification predict_image -- result of classification
""" """
# input original image data in size of 28*28*1 # input original image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# With softmax as the fully connected layer of the activation function, the size of the output layer must be 10 # With softmax as the fully connected layer of the activation function, the size of the output layer must be 10
predict = fluid.layers.fc( predict = fluid.layers.fc(
input=img, size=10, act='softmax') input=img, size=10, act='softmax')
...@@ -208,7 +208,7 @@ def multilayer_perceptron(): ...@@ -208,7 +208,7 @@ def multilayer_perceptron():
predict_image -- result of classification predict_image -- result of classification
""" """
# input raw image data in size of 28*28*1 # input raw image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# the first fully connected layer, whose activation function is ReLU # the first fully connected layer, whose activation function is ReLU
hidden = fluid.layers.fc(input=img, size=200, act='relu') hidden = fluid.layers.fc(input=img, size=200, act='relu')
# the second fully connected layer, whose activation function is ReLU # the second fully connected layer, whose activation function is ReLU
...@@ -230,7 +230,7 @@ def convolutional_neural_network(): ...@@ -230,7 +230,7 @@ def convolutional_neural_network():
predict -- result of classification predict -- result of classification
""" """
# input raw image data in size of 28*28*1 # input raw image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# the first convolution-pooling layer # the first convolution-pooling layer
# Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu.
conv_pool_1 = fluid.nets.simple_img_conv_pool( conv_pool_1 = fluid.nets.simple_img_conv_pool(
...@@ -275,7 +275,7 @@ def train_program(): ...@@ -275,7 +275,7 @@ def train_program():
""" """
# label layer, called label, correspondent with label category of input picture # label layer, called label, correspondent with label category of input picture
label = fluid.data(name='label', shape=[-1, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# predict = softmax_regression() # cancel note and run Softmax regression # predict = softmax_regression() # cancel note and run Softmax regression
# predict = multilayer_perceptron() # cancel note and run multiple perceptron # predict = multilayer_perceptron() # cancel note and run multiple perceptron
......
...@@ -251,7 +251,7 @@ def softmax_regression(): ...@@ -251,7 +251,7 @@ def softmax_regression():
predict_image -- 分类的结果 predict_image -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10 # 以softmax为激活函数的全连接层,输出层的大小必须为数字的个数10
predict = fluid.layers.fc( predict = fluid.layers.fc(
input=img, size=10, act='softmax') input=img, size=10, act='softmax')
...@@ -271,7 +271,7 @@ def multilayer_perceptron(): ...@@ -271,7 +271,7 @@ def multilayer_perceptron():
predict_image -- 分类的结果 predict_image -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 第一个全连接层,激活函数为ReLU # 第一个全连接层,激活函数为ReLU
hidden = fluid.layers.fc(input=img, size=200, act='relu') hidden = fluid.layers.fc(input=img, size=200, act='relu')
# 第二个全连接层,激活函数为ReLU # 第二个全连接层,激活函数为ReLU
...@@ -293,7 +293,7 @@ def convolutional_neural_network(): ...@@ -293,7 +293,7 @@ def convolutional_neural_network():
predict -- 分类的结果 predict -- 分类的结果
""" """
# 输入的原始图像数据,大小为28*28*1 # 输入的原始图像数据,大小为28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# 第一个卷积-池化层 # 第一个卷积-池化层
# 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu # 使用20个5*5的滤波器,池化大小为2,池化步长为2,激活函数为Relu
conv_pool_1 = fluid.nets.simple_img_conv_pool( conv_pool_1 = fluid.nets.simple_img_conv_pool(
...@@ -338,7 +338,7 @@ def train_program(): ...@@ -338,7 +338,7 @@ def train_program():
""" """
# 标签层,名称为label,对应输入图片的类别标签 # 标签层,名称为label,对应输入图片的类别标签
label = fluid.data(name='label', shape=[-1, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# predict = softmax_regression() # 取消注释将使用 Softmax回归 # predict = softmax_regression() # 取消注释将使用 Softmax回归
# predict = multilayer_perceptron() # 取消注释将使用 多层感知器 # predict = multilayer_perceptron() # 取消注释将使用 多层感知器
......
...@@ -230,7 +230,7 @@ def softmax_regression(): ...@@ -230,7 +230,7 @@ def softmax_regression():
predict_image -- result of classification predict_image -- result of classification
""" """
# input original image data in size of 28*28*1 # input original image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# With softmax as the fully connected layer of the activation function, the size of the output layer must be 10 # With softmax as the fully connected layer of the activation function, the size of the output layer must be 10
predict = fluid.layers.fc( predict = fluid.layers.fc(
input=img, size=10, act='softmax') input=img, size=10, act='softmax')
...@@ -250,7 +250,7 @@ def multilayer_perceptron(): ...@@ -250,7 +250,7 @@ def multilayer_perceptron():
predict_image -- result of classification predict_image -- result of classification
""" """
# input raw image data in size of 28*28*1 # input raw image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# the first fully connected layer, whose activation function is ReLU # the first fully connected layer, whose activation function is ReLU
hidden = fluid.layers.fc(input=img, size=200, act='relu') hidden = fluid.layers.fc(input=img, size=200, act='relu')
# the second fully connected layer, whose activation function is ReLU # the second fully connected layer, whose activation function is ReLU
...@@ -272,7 +272,7 @@ def convolutional_neural_network(): ...@@ -272,7 +272,7 @@ def convolutional_neural_network():
predict -- result of classification predict -- result of classification
""" """
# input raw image data in size of 28*28*1 # input raw image data in size of 28*28*1
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
# the first convolution-pooling layer # the first convolution-pooling layer
# Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu. # Use 20 5*5 filters, the pooling size is 2, the pooling step is 2, and the activation function is Relu.
conv_pool_1 = fluid.nets.simple_img_conv_pool( conv_pool_1 = fluid.nets.simple_img_conv_pool(
...@@ -317,7 +317,7 @@ def train_program(): ...@@ -317,7 +317,7 @@ def train_program():
""" """
# label layer, called label, correspondent with label category of input picture # label layer, called label, correspondent with label category of input picture
label = fluid.data(name='label', shape=[-1, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# predict = softmax_regression() # cancel note and run Softmax regression # predict = softmax_regression() # cancel note and run Softmax regression
# predict = multilayer_perceptron() # cancel note and run multiple perceptron # predict = multilayer_perceptron() # cancel note and run multiple perceptron
......
...@@ -101,8 +101,8 @@ def train(nn_type, ...@@ -101,8 +101,8 @@ def train(nn_type,
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
img = fluid.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[-1, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
if nn_type == 'softmax_regression': if nn_type == 'softmax_regression':
net_conf = softmax_regression net_conf = softmax_regression
......
...@@ -265,16 +265,16 @@ dg_program = fluid.Program() ...@@ -265,16 +265,16 @@ dg_program = fluid.Program()
# 定义判别真实图片的program # 定义判别真实图片的program
with fluid.program_guard(d_program): with fluid.program_guard(d_program):
# 输入图片大小为28*28=784 # 输入图片大小为28*28=784
img = fluid.layers.data(name='img', shape=[784], dtype='float32') img = fluid.data(name='img', shape=[None, 784], dtype='float32')
# 标签shape=1 # 标签shape=1
label = fluid.layers.data(name='label', shape=[1], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='float32')
d_logit = D(img) d_logit = D(img)
d_loss = loss(d_logit, label) d_loss = loss(d_logit, label)
# 定义判别生成图片的program # 定义判别生成图片的program
with fluid.program_guard(dg_program): with fluid.program_guard(dg_program):
noise = fluid.layers.data( noise = fluid.data(
name='noise', shape=[NOISE_SIZE], dtype='float32') name='noise', shape=[None, NOISE_SIZE], dtype='float32')
# 噪声数据作为输入得到生成图片 # 噪声数据作为输入得到生成图片
g_img = G(x=noise) g_img = G(x=noise)
......
...@@ -60,14 +60,14 @@ def train(args): ...@@ -60,14 +60,14 @@ def train(args):
dg_program = fluid.Program() dg_program = fluid.Program()
with fluid.program_guard(d_program): with fluid.program_guard(d_program):
img = fluid.layers.data(name='img', shape=[784], dtype='float32') img = fluid.data(name='img', shape=[None, 784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='float32')
d_logit = D(img) d_logit = D(img)
d_loss = loss(d_logit, label) d_loss = loss(d_logit, label)
with fluid.program_guard(dg_program): with fluid.program_guard(dg_program):
noise = fluid.layers.data( noise = fluid.data(
name='noise', shape=[NOISE_SIZE], dtype='float32') name='noise', shape=[None, NOISE_SIZE], dtype='float32')
g_img = G(x=noise) g_img = G(x=noise)
g_program = dg_program.clone() g_program = dg_program.clone()
......
...@@ -307,16 +307,16 @@ dg_program = fluid.Program() ...@@ -307,16 +307,16 @@ dg_program = fluid.Program()
# 定义判别真实图片的program # 定义判别真实图片的program
with fluid.program_guard(d_program): with fluid.program_guard(d_program):
# 输入图片大小为28*28=784 # 输入图片大小为28*28=784
img = fluid.layers.data(name='img', shape=[784], dtype='float32') img = fluid.data(name='img', shape=[None, 784], dtype='float32')
# 标签shape=1 # 标签shape=1
label = fluid.layers.data(name='label', shape=[1], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='float32')
d_logit = D(img) d_logit = D(img)
d_loss = loss(d_logit, label) d_loss = loss(d_logit, label)
# 定义判别生成图片的program # 定义判别生成图片的program
with fluid.program_guard(dg_program): with fluid.program_guard(dg_program):
noise = fluid.layers.data( noise = fluid.data(
name='noise', shape=[NOISE_SIZE], dtype='float32') name='noise', shape=[None, NOISE_SIZE], dtype='float32')
# 噪声数据作为输入得到生成图片 # 噪声数据作为输入得到生成图片
g_img = G(x=noise) g_img = G(x=noise)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册