提交 a43862fe 编写于 作者: Z zq19 提交者: xsrobin

Release/1.5 synchronize api_en and api_cn (#1068)

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn
上级 832f9f09
...@@ -183,7 +183,7 @@ GradientClipByValue ...@@ -183,7 +183,7 @@ GradientClipByValue
w_param_attrs = fluid.ParamAttr(name=None, w_param_attrs = fluid.ParamAttr(name=None,
initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0), initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
learning_rate=1.0, learning_rate=1.0,
regularizer=fluid.regualrizer.L1Decay(1.0), regularizer=fluid.regularizer.L1Decay(1.0),
trainable=True, trainable=True,
gradient_clip=fluid.clip.GradientClipByValue(-1.0, 1.0)) gradient_clip=fluid.clip.GradientClipByValue(-1.0, 1.0))
x = fluid.layers.data(name='x', shape=[10], dtype='float32') x = fluid.layers.data(name='x', shape=[10], dtype='float32')
......
...@@ -22,7 +22,7 @@ DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创 ...@@ -22,7 +22,7 @@ DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
dataset = paddle.fluid.DatasetFactory().create_dataset("InMemoryDataset") dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
.. py:method:: create_dataset(datafeed_class='QueueDataset') .. py:method:: create_dataset(datafeed_class='QueueDataset')
...@@ -127,7 +127,7 @@ InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类 ...@@ -127,7 +127,7 @@ InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类
dataset.global_shuffle(fleet) dataset.global_shuffle(fleet)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
exe.train_from_dataset(fluid.default_main_program(), dataset)dataset.release_memory() exe.train_from_dataset(fluid.default_main_program(), dataset)
dataset.release_memory() dataset.release_memory()
.. py:method:: get_memory_data_size(fleet=None) .. py:method:: get_memory_data_size(fleet=None)
......
...@@ -1041,8 +1041,9 @@ DistributeTranspiler ...@@ -1041,8 +1041,9 @@ DistributeTranspiler
t = fluid.DistributeTranspiler(config=config) t = fluid.DistributeTranspiler(config=config)
t.transpile(trainer_id=trainer_id, trainers=trainer_endpoints, current_endpoint="192.168.0.1:6174") t.transpile(trainer_id=trainer_id, trainers=trainer_endpoints, current_endpoint="192.168.0.1:6174")
exe = fluid.ParallelExecutor( exe = fluid.ParallelExecutor(
use_cuda=True,
loss_name=avg_loss.name, loss_name=avg_loss.name,
num_trainers=len(trainer_num, num_trainers=trainer_num,
trainer_id=trainer_id trainer_id=trainer_id
) )
...@@ -1273,11 +1274,11 @@ ExecutionStrategy ...@@ -1273,11 +1274,11 @@ ExecutionStrategy
int型成员。它表明了清空执行时产生的临时变量需要的程序执行迭代次数。因为临时变量的形状可能在两次重复过程中保持一致,所以它会使整体执行过程更快。默认值为1。 int型成员。它表明了清空执行时产生的临时变量需要的程序执行迭代次数。因为临时变量的形状可能在两次重复过程中保持一致,所以它会使整体执行过程更快。默认值为1。
.. note:: .. note::
1. 如果在调用 ``run`` 方法时获取结果数据,``ParallelExecutor`` 会在当前程序重复执行尾部清空临时变量 1. 如果在调用 ``run`` 方法时获取结果数据,``llelExecutor`` 会在当前程序重复执行尾部清空临时变量
2. 在一些NLP模型里,该成员会致使GPU内存不足。此时,你应减少 ``num_iteration_per_drop_scope`` 的值 2. 在一些NLP模型里,该成员会致使GPU内存不足。此时,你应减少 ``num_iteration_per_drop_scope`` 的值
.. py:attribute:: num_iteration_per_run .. py:attribute:: num_iteration_per_runPara
它配置了当用户在python脚本中调用pe.run()时执行器会执行的迭代次数。 它配置了当用户在python脚本中调用pe.run()时执行器会执行的迭代次数。
.. py:attribute:: num_threads .. py:attribute:: num_threads
...@@ -1508,7 +1509,7 @@ infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在 ...@@ -1508,7 +1509,7 @@ infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在
filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"]
dataset.set_filelist(filelist) dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(), exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset) dataset=dataset)
...@@ -2100,6 +2101,7 @@ ParallelExecutor ...@@ -2100,6 +2101,7 @@ ParallelExecutor
loss = fluid.layers.mean(hidden) loss = fluid.layers.mean(hidden)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program) exe.run(startup_program)
parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda, parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
...@@ -2143,8 +2145,9 @@ ParamAttr ...@@ -2143,8 +2145,9 @@ ParamAttr
w_param_attrs = fluid.ParamAttr(name="fc_weight", w_param_attrs = fluid.ParamAttr(name="fc_weight",
learning_rate=0.5, learning_rate=0.5,
regularizer=fluid.L2Decay(1.0), regularizer=fluid.regularizer.L2Decay(1.0),
trainable=True) trainable=True)
x = fluid.layers.data(name='X', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs) y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs)
......
...@@ -227,7 +227,7 @@ NormalInitializer ...@@ -227,7 +227,7 @@ NormalInitializer
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=x, size=10, fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0) param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
.. _cn_api_fluid_initializer_NumpyArrayInitializer: .. _cn_api_fluid_initializer_NumpyArrayInitializer:
......
...@@ -138,7 +138,7 @@ box_clip ...@@ -138,7 +138,7 @@ box_clip
name='boxes', shape=[8, 4], dtype='float32', lod_level=1) name='boxes', shape=[8, 4], dtype='float32', lod_level=1)
im_info = fluid.layers.data(name='im_info', shape=[3]) im_info = fluid.layers.data(name='im_info', shape=[3])
out = fluid.layers.box_clip( out = fluid.layers.box_clip(
input=boxes, im_info=im_info, inplace=True) input=boxes, im_info=im_info)
...@@ -286,7 +286,7 @@ box decode过程得出decode_box,然后分配方案如下所述: ...@@ -286,7 +286,7 @@ box decode过程得出decode_box,然后分配方案如下所述:
pb = fluid.layers.data( pb = fluid.layers.data(
name='prior_box', shape=[4], dtype='float32') name='prior_box', shape=[4], dtype='float32')
pbv = fluid.layers.data( pbv = fluid.layers.data(
name='prior_box_var', shape=[4], dtype='float32', append_batch_size=False)) name='prior_box_var', shape=[4], dtype='float32', append_batch_size=False)
loc = fluid.layers.data( loc = fluid.layers.data(
name='target_box', shape=[4*81], dtype='float32') name='target_box', shape=[4*81], dtype='float32')
scores = fluid.layers.data( scores = fluid.layers.data(
...@@ -1288,14 +1288,16 @@ rpn_target_assign ...@@ -1288,14 +1288,16 @@ rpn_target_assign
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
bbox_pred = fluid.layers.data(name=’bbox_pred’, shape=[100, 4], bbox_pred = fluid.layers.data(name='bbox_pred', shape=[100, 4],
append_batch_size=False, dtype=’float32’) append_batch_size=False, dtype='float32')
cls_logits = fluid.layers.data(name=’cls_logits’, shape=[100, 1], cls_logits = fluid.layers.data(name='cls_logits', shape=[100, 1],
append_batch_size=False, dtype=’float32’) append_batch_size=False, dtype='float32')
anchor_box = fluid.layers.data(name=’anchor_box’, shape=[20, 4], anchor_box = fluid.layers.data(name='anchor_box', shape=[20, 4],
append_batch_size=False, dtype=’float32’) append_batch_size=False, dtype='float32')
gt_boxes = fluid.layers.data(name=’gt_boxes’, shape=[10, 4], anchor_var = fluid.layers.data(name='anchor_var', shape=[20, 4],
append_batch_size=False, dtype=’float32’) append_batch_size=False, dtype='float32')
gt_boxes = fluid.layers.data(name='gt_boxes', shape=[10, 4],
append_batch_size=False, dtype='float32')
is_crowd = fluid.layers.data(name='is_crowd', shape=[1], is_crowd = fluid.layers.data(name='is_crowd', shape=[1],
append_batch_size=False, dtype='float32') append_batch_size=False, dtype='float32')
im_info = fluid.layers.data(name='im_infoss', shape=[1, 3], im_info = fluid.layers.data(name='im_infoss', shape=[1, 3],
...@@ -1684,9 +1686,9 @@ yolov3_loss ...@@ -1684,9 +1686,9 @@ yolov3_loss
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32') x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32')
gt_box = fluid.layers.data(name='gtbox', shape=[6, 4], dtype='float32') gt_box = fluid.layers.data(name='gt_box', shape=[6, 4], dtype='float32')
gt_label = fluid.layers.data(name='gtlabel', shape=[6], dtype='int32') gt_label = fluid.layers.data(name='gt_label', shape=[6], dtype='int32')
gt_score = fluid.layers.data(name='gtscore', shape=[6], dtype='float32') gt_score = fluid.layers.data(name='gt_score', shape=[6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2] anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label, loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
......
...@@ -85,8 +85,8 @@ create_py_reader_by_data ...@@ -85,8 +85,8 @@ create_py_reader_by_data
loss = fluid.layers.cross_entropy(input=predict, label=label) loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss) return fluid.layers.mean(loss)
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtypes='float32') image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtypes='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64, reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label]) feed_list=[image, label])
reader.decorate_paddle_reader( reader.decorate_paddle_reader(
......
...@@ -85,7 +85,7 @@ auc ...@@ -85,7 +85,7 @@ auc
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int32") label = fluid.layers.data(name="label", shape=[1], dtype="int32")
predict = fluid.layers.fc(input=data, size=2) predict = fluid.layers.fc(input=data, size=2)
auc_out=fluid.layers.auc(input=prediction, label=label) auc_out=fluid.layers.auc(input=predict, label=label)
......
...@@ -144,7 +144,7 @@ pooling3d操作根据输入 ``input`` ,``pool_size`` , ``pool_type`` 参数 ...@@ -144,7 +144,7 @@ pooling3d操作根据输入 ``input`` ,``pool_size`` , ``pool_type`` 参数
data = fluid.layers.data( data = fluid.layers.data(
name='data', shape=[3, 32, 32, 32], dtype='float32') name='data', shape=[3, 32, 32, 32], dtype='float32')
pool_out, mask = fluid.layers.adaptive_pool3d( pool_out = fluid.layers.adaptive_pool3d(
input=data, input=data,
pool_size=[3, 3, 3], pool_size=[3, 3, 3],
pool_type='avg') pool_type='avg')
...@@ -520,7 +520,7 @@ beam_search ...@@ -520,7 +520,7 @@ beam_search
name='probs', shape=[10000], dtype='float32') name='probs', shape=[10000], dtype='float32')
topk_scores, topk_indices = fluid.layers.topk(probs, k=beam_size) topk_scores, topk_indices = fluid.layers.topk(probs, k=beam_size)
accu_scores = fluid.layers.elementwise_add( accu_scores = fluid.layers.elementwise_add(
x=fluid.layers.log(x=topk_scores)), x=fluid.layers.log(x=topk_scores),
y=fluid.layers.reshape( y=fluid.layers.reshape(
pre_scores, shape=[-1]), pre_scores, shape=[-1]),
axis=0) axis=0)
...@@ -698,7 +698,7 @@ BRelu 激活函数 ...@@ -698,7 +698,7 @@ BRelu 激活函数
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype=”float32”) x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0) y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0)
...@@ -1456,7 +1456,7 @@ crop ...@@ -1456,7 +1456,7 @@ crop
## or ## or
z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32") z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 3]) crop = fluid.layers.crop(z, shape=[-1, 2, 3])
...@@ -3481,7 +3481,7 @@ gaussian_random算子。 ...@@ -3481,7 +3481,7 @@ gaussian_random算子。
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
out = fluid.layers.gaussian_random(shape=[20, 30]) out = layers.gaussian_random(shape=[20, 30])
...@@ -3629,9 +3629,9 @@ step 2: ...@@ -3629,9 +3629,9 @@ step 2:
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[3, 10, 32, 32], dtype='float32') x = fluid.layers.data(name='x', shape=[10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[3, 2, 3], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]}) grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid) out = fluid.layers.grid_sampler(x=x, grid=grid)
...@@ -4383,9 +4383,9 @@ label_smooth ...@@ -4383,9 +4383,9 @@ label_smooth
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
label = fluid.layers.data(name="label", shape=[1], dtype="float32") label = layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = fluid.layers.one_hot(input=label, depth=10) one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = fluid.layers.label_smooth( smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32") label=one_hot_label, epsilon=0.1, dtype="float32")
...@@ -5043,7 +5043,7 @@ sigmoid的计算公式为: :math:`sigmoid(x) = 1 / (1 + e^{-x})` 。 ...@@ -5043,7 +5043,7 @@ sigmoid的计算公式为: :math:`sigmoid(x) = 1 / (1 + e^{-x})` 。
init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
rnn_out, last_h, last_c = fluid.layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob) rnn_out, last_h, last_c = layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob)
...@@ -5118,8 +5118,8 @@ lstm单元的输入包括 :math:`x_{t}` , :math:`h_{t-1}` 和 :math:`c_{t-1}` ...@@ -5118,8 +5118,8 @@ lstm单元的输入包括 :math:`x_{t}` , :math:`h_{t-1}` 和 :math:`c_{t-1}`
pre_cell = fluid.layers.data(name='pre_cell', shape=[hidden_dim], dtype='float32') pre_cell = fluid.layers.data(name='pre_cell', shape=[hidden_dim], dtype='float32')
hidden = fluid.layers.lstm_unit( hidden = fluid.layers.lstm_unit(
x_t=x, x_t=x,
hidden_t_prev=prev_hidden, hidden_t_prev=pre_hidden,
cell_t_prev=prev_cell) cell_t_prev=pre_cell)
...@@ -5373,7 +5373,7 @@ mean_iou ...@@ -5373,7 +5373,7 @@ mean_iou
import paddle.fluid as fluid import paddle.fluid as fluid
predict = fluid.layers.data(name='predict', shape=[3, 32, 32]) predict = fluid.layers.data(name='predict', shape=[3, 32, 32])
label = fluid.layers.data(name='label', shape=[1]) label = fluid.layers.data(name='label', shape=[1])
iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes) iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes=5)
...@@ -7652,7 +7652,7 @@ sequence_expand ...@@ -7652,7 +7652,7 @@ sequence_expand
x = fluid.layers.data(name='x', shape=[10], dtype='float32') x = fluid.layers.data(name='x', shape=[10], dtype='float32')
y = fluid.layers.data(name='y', shape=[10, 20], y = fluid.layers.data(name='y', shape=[10, 20],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0) out = layers.sequence_expand(x=x, y=y, ref_level=0)
...@@ -8806,7 +8806,7 @@ SoftRelu 激活函数 ...@@ -8806,7 +8806,7 @@ SoftRelu 激活函数
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name=”x”, shape=[2,3,16,16], dtype=”float32”) x = fluid.layers.data(name="x", shape=[3,16,16], dtype="float32")
y = fluid.layers.soft_relu(x, threshold=20.0) y = fluid.layers.soft_relu(x, threshold=20.0)
...@@ -9078,7 +9078,7 @@ split ...@@ -9078,7 +9078,7 @@ split
# x1.shape [-1, 3, 3, 5] # x1.shape [-1, 3, 3, 5]
# x2.shape [-1, 3, 3, 5] # x2.shape [-1, 3, 3, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=2) x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=2)
# x0.shape [-1, 3, 2, 5] # x0.shape [-1, 3, 2, 5]
# x1.shape [-1, 3, 3, 5] # x1.shape [-1, 3, 3, 5]
# x2.shape [-1, 3, 4, 5] # x2.shape [-1, 3, 4, 5]
...@@ -9180,8 +9180,8 @@ squeeze ...@@ -9180,8 +9180,8 @@ squeeze
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
x = fluid.layers.data(name='x', shape=[5, 1, 10]) x = layers.data(name='x', shape=[5, 1, 10])
y = fluid.layers.sequeeze(input=x, axes=[1]) y = layers.squeeze(input=x, axes=[1])
...@@ -9331,9 +9331,9 @@ sum算子。 ...@@ -9331,9 +9331,9 @@ sum算子。
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
input0 = fluid.layers.data(name="input0", shape=[13, 11], dtype='float32') input0 = layers.data(name="input0", shape=[13, 11], dtype='float32')
input1 = layers.data(name="input1", shape=[13, 11], dtype='float32') input1 = layers.data(name="input1", shape=[13, 11], dtype='float32')
out = fluid.layers.sum([input0,input1]) out = layers.sum([input0,input1])
...@@ -9518,7 +9518,7 @@ topk ...@@ -9518,7 +9518,7 @@ topk
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
input = layers.data(name="input", shape=[13, 11], dtype='float32') input = layers.data(name="input", shape=[13, 11], dtype='float32')
top5_values, top5_indices = fluid.layers.topk(input, k=5) top5_values, top5_indices = layers.topk(input, k=5)
...@@ -9651,8 +9651,8 @@ uniform_random_batch_size_like算子。 ...@@ -9651,8 +9651,8 @@ uniform_random_batch_size_like算子。
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
input = fluid.layers.data(name="input", shape=[13, 11], dtype='float32') input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.uniform_random_batch_size_like(input, [-1, 11]) out = layers.uniform_random_batch_size_like(input, [-1, 11])
...@@ -9685,7 +9685,7 @@ unsqueeze ...@@ -9685,7 +9685,7 @@ unsqueeze
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10]) x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsequeeze(input=x, axes=[1]) y = fluid.layers.unsqueeze(input=x, axes=[1])
......
...@@ -28,8 +28,8 @@ argmax ...@@ -28,8 +28,8 @@ argmax
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
out = fluid.layers.argmax(x=in, axis=0) out = fluid.layers.argmax(x, axis=0)
out = fluid.layers.argmax(x=in, axis=-1) out = fluid.layers.argmax(x, axis=-1)
...@@ -64,8 +64,8 @@ argmin ...@@ -64,8 +64,8 @@ argmin
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
out = fluid.layers.argmin(x=in, axis=0) out = fluid.layers.argmin(x, axis=0)
out = fluid.layers.argmin(x=in, axis=-1) out = fluid.layers.argmin(x, axis=-1)
...@@ -224,7 +224,7 @@ concat ...@@ -224,7 +224,7 @@ concat
b = fluid.layers.data(name='b', shape=[2, 3], dtype='float32') b = fluid.layers.data(name='b', shape=[2, 3], dtype='float32')
c = fluid.layers.data(name='c', shape=[2, 2], dtype='float32') c = fluid.layers.data(name='c', shape=[2, 2], dtype='float32')
d = fluid.layers.data(name='d', shape=[2, 5], dtype='float32') d = fluid.layers.data(name='d', shape=[2, 5], dtype='float32')
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) out = fluid.layers.concat(input=[a, b, c, d], axis=2)
...@@ -298,7 +298,7 @@ create_parameter ...@@ -298,7 +298,7 @@ create_parameter
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') W = layers.create_parameter(shape=[784, 200], dtype='float32')
......
...@@ -357,7 +357,6 @@ EditDistance ...@@ -357,7 +357,6 @@ EditDistance
seq_num_batch0 = batch_size seq_num_batch0 = batch_size
distance_evaluator.update(edit_distances_batch0, seq_num_batch0) distance_evaluator.update(edit_distances_batch0, seq_num_batch0)
distance, instance_error = distance_evaluator.eval()
avg_distance, wrong_instance_ratio = distance_evaluator.eval() avg_distance, wrong_instance_ratio = distance_evaluator.eval()
print("the average edit distance for batch0 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) print("the average edit distance for batch0 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio))
edit_distances_batch1 = np.random.randint(low = 0, high = 10, size = (batch_size, 1)) edit_distances_batch1 = np.random.randint(low = 0, high = 10, size = (batch_size, 1))
...@@ -367,6 +366,8 @@ EditDistance ...@@ -367,6 +366,8 @@ EditDistance
avg_distance, wrong_instance_ratio = distance_evaluator.eval() avg_distance, wrong_instance_ratio = distance_evaluator.eval()
print("the average edit distance for batch0 and batch1 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio)) print("the average edit distance for batch0 and batch1 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio))
distance_evaluator.reset()
.. py:method:: distance_evaluator.reset() .. py:method:: distance_evaluator.reset()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册