From 2131aacc84fe181ee65b8ee72ecf6a184259dc9a Mon Sep 17 00:00:00 2001 From: ruri Date: Wed, 26 Feb 2020 19:24:32 +0800 Subject: [PATCH] update data api (#1811) (#1812) update fluid.layers.data to layers.data --- .../simple_case/image_classification/README.cn.md | 8 ++++---- .../simple_case/image_classification/README.md | 10 +++++----- .../simple_case/image_classification/index.cn.html | 8 ++++---- .../simple_case/image_classification/index.html | 10 +++++----- .../simple_case/image_classification/train.py | 6 +++--- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/fluid/user_guides/simple_case/image_classification/README.cn.md b/doc/fluid/user_guides/simple_case/image_classification/README.cn.md index ee7987c38..cdc22a93b 100644 --- a/doc/fluid/user_guides/simple_case/image_classification/README.cn.md +++ b/doc/fluid/user_guides/simple_case/image_classification/README.cn.md @@ -315,8 +315,8 @@ def resnet_cifar10(ipt, depth=32): ```python def inference_program(): # The image is 32 * 32 with RGB representation. - data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) # predict = vgg_bn_drop(images) # un-comment to use vgg net @@ -327,7 +327,7 @@ def inference_program(): 然后我们需要设置训练程序 `train_program`。它首先从推理程序中进行预测。 在训练期间,它将从预测中计算 `avg_cost`。 -在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.layers.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 +在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 **注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 @@ -335,7 +335,7 @@ def inference_program(): def train_program(): predict = inference_program() - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = fluid.data(name='label', shape=[None,1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) diff --git a/doc/fluid/user_guides/simple_case/image_classification/README.md b/doc/fluid/user_guides/simple_case/image_classification/README.md index 310733c1c..6471b3398 100644 --- a/doc/fluid/user_guides/simple_case/image_classification/README.md +++ b/doc/fluid/user_guides/simple_case/image_classification/README.md @@ -309,13 +309,13 @@ def resnet_cifar10(ipt, depth=32): ## Inference Program Configuration -The input to the network is defined as `fluid.layers.data` , corresponding to image pixels in the context of image classification. The images in CIFAR10 are 32x32 coloured images with three channels. Therefore, the size of the input data is 3072 (3x32x32). +The input to the network is defined as `fluid.data` , corresponding to image pixels in the context of image classification. The images in CIFAR10 are 32x32 coloured images with three channels. Therefore, the size of the input data is 3072 (3x32x32). ```python def inference_program(): # The image is 32 * 32 with RGB representation. - data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) # predict = vgg_bn_drop(images) # un-comment to use vgg net @@ -326,7 +326,7 @@ def inference_program(): Then we need to set up the the `train_program`. It takes the prediction from the inference_program first. During the training, it will calculate the `avg_loss` from the prediction. -In the context of supervised learning, labels of training images are defined in `fluid.layers.data` as well. During training, the multi-class cross-entropy is used as the loss function and becomes the output of the network. During testing, the outputs are the probabilities calculated in the classifier. +In the context of supervised learning, labels of training images are defined in `fluid.data` as well. During training, the multi-class cross-entropy is used as the loss function and becomes the output of the network. During testing, the outputs are the probabilities calculated in the classifier. **NOTE:** A training program should return an array and the first returned argument has to be `avg_cost` . The trainer always uses it to calculate the gradients. @@ -335,7 +335,7 @@ The trainer always uses it to calculate the gradients. def train_program(): predict = inference_program() - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) diff --git a/doc/fluid/user_guides/simple_case/image_classification/index.cn.html b/doc/fluid/user_guides/simple_case/image_classification/index.cn.html index ffbf605eb..6c53ac545 100644 --- a/doc/fluid/user_guides/simple_case/image_classification/index.cn.html +++ b/doc/fluid/user_guides/simple_case/image_classification/index.cn.html @@ -357,8 +357,8 @@ def resnet_cifar10(ipt, depth=32): ```python def inference_program(): # The image is 32 * 32 with RGB representation. - data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) # predict = vgg_bn_drop(images) # un-comment to use vgg net @@ -369,7 +369,7 @@ def inference_program(): 然后我们需要设置训练程序 `train_program`。它首先从推理程序中进行预测。 在训练期间,它将从预测中计算 `avg_cost`。 -在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.layers.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 +在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 **注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 @@ -377,7 +377,7 @@ def inference_program(): def train_program(): predict = inference_program() - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) diff --git a/doc/fluid/user_guides/simple_case/image_classification/index.html b/doc/fluid/user_guides/simple_case/image_classification/index.html index aae792c4d..66c1a657e 100644 --- a/doc/fluid/user_guides/simple_case/image_classification/index.html +++ b/doc/fluid/user_guides/simple_case/image_classification/index.html @@ -351,13 +351,13 @@ def resnet_cifar10(ipt, depth=32): ## Inference Program Configuration -The input to the network is defined as `fluid.layers.data` , corresponding to image pixels in the context of image classification. The images in CIFAR10 are 32x32 coloured images with three channels. Therefore, the size of the input data is 3072 (3x32x32). +The input to the network is defined as `fluid.data` , corresponding to image pixels in the context of image classification. The images in CIFAR10 are 32x32 coloured images with three channels. Therefore, the size of the input data is 3072 (3x32x32). ```python def inference_program(): # The image is 32 * 32 with RGB representation. - data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) # predict = vgg_bn_drop(images) # un-comment to use vgg net @@ -368,7 +368,7 @@ def inference_program(): Then we need to set up the the `train_program`. It takes the prediction from the inference_program first. During the training, it will calculate the `avg_loss` from the prediction. -In the context of supervised learning, labels of training images are defined in `fluid.layers.data` as well. During training, the multi-class cross-entropy is used as the loss function and becomes the output of the network. During testing, the outputs are the probabilities calculated in the classifier. +In the context of supervised learning, labels of training images are defined in `fluid.data` as well. During training, the multi-class cross-entropy is used as the loss function and becomes the output of the network. During testing, the outputs are the probabilities calculated in the classifier. **NOTE:** A training program should return an array and the first returned argument has to be `avg_cost` . The trainer always uses it to calculate the gradients. @@ -377,7 +377,7 @@ The trainer always uses it to calculate the gradients. def train_program(): predict = inference_program() - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) diff --git a/doc/fluid/user_guides/simple_case/image_classification/train.py b/doc/fluid/user_guides/simple_case/image_classification/train.py index 28bbceb36..094791808 100644 --- a/doc/fluid/user_guides/simple_case/image_classification/train.py +++ b/doc/fluid/user_guides/simple_case/image_classification/train.py @@ -40,8 +40,8 @@ def parse_args(): def inference_network(): # The image is 32 * 32 with RGB representation. - data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + data_shape = [None, 3, 32, 32] + images = fluid.data(name='pixel', shape=data_shape, dtype='float32') predict = resnet_cifar10(images, 32) # predict = vgg_bn_drop(images) # un-comment to use vgg net @@ -49,7 +49,7 @@ def inference_network(): def train_network(predict): - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) -- GitLab