提交 a3783442 编写于 作者: Z zq19 提交者: xsrobin

Synchronize api_en and api_cn. (#1067)

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn

* synchronize api_en and api_cn
上级 ef45bcad
......@@ -26,7 +26,7 @@ GradientClipByValue
w_param_attrs = fluid.ParamAttr(name=None,
initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
learning_rate=1.0,
regularizer=fluid.regualrizer.L1Decay(1.0),
regularizer=fluid.regularizer.L1Decay(1.0),
trainable=True,
gradient_clip=fluid.clip.GradientClipByValue(-1.0, 1.0))
x = fluid.layers.data(name='x', shape=[10], dtype='float32')
......
......@@ -13,7 +13,7 @@ DatasetFactory是一个按数据集名称创建数据集的 "工厂",可以创
.. code-block:: python
import paddle.fluid as fluid
dataset = paddle.fluid.DatasetFactory().create_dataset("InMemoryDataset")
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
.. py:method:: create_dataset(datafeed_class='QueueDataset')
......
......@@ -84,7 +84,7 @@ InMemoryDataset会向内存中加载数据并在训练前缓冲数据。此类
dataset.global_shuffle(fleet)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
exe.train_from_dataset(fluid.default_main_program(), dataset)dataset.release_memory()
exe.train_from_dataset(fluid.default_main_program(), dataset)
dataset.release_memory()
.. py:method:: get_memory_data_size(fleet=None)
......
......@@ -211,7 +211,7 @@ infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在
filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
......@@ -78,7 +78,7 @@ reader通常返回一个minibatch条目列表。在列表中每一条目都是
for data in reader():
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[out]))
fetch_list=[out])
.. py:method:: feed(iterable)
......
......@@ -190,6 +190,7 @@ ParallelExecutor
loss = fluid.layers.mean(hidden)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
......
......@@ -26,8 +26,9 @@ ParamAttr
w_param_attrs = fluid.ParamAttr(name="fc_weight",
learning_rate=0.5,
regularizer=fluid.L2Decay(1.0),
regularizer=fluid.regularizer.L2Decay(1.0),
trainable=True)
x = fluid.layers.data(name='X', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs)
......
......@@ -19,6 +19,6 @@ NormalInitializer
import paddle.fluid as fluid
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)
param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
......@@ -71,7 +71,7 @@ pooling3d操作根据输入 ``input`` ,``pool_size`` , ``pool_type`` 参数
data = fluid.layers.data(
name='data', shape=[3, 32, 32, 32], dtype='float32')
pool_out, mask = fluid.layers.adaptive_pool3d(
pool_out = fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
......
......@@ -23,8 +23,8 @@ argmax
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
out = fluid.layers.argmax(x=in, axis=0)
out = fluid.layers.argmax(x=in, axis=-1)
out = fluid.layers.argmax(x, axis=0)
out = fluid.layers.argmax(x, axis=-1)
......
......@@ -23,8 +23,8 @@ argmin
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
out = fluid.layers.argmin(x=in, axis=0)
out = fluid.layers.argmin(x=in, axis=-1)
out = fluid.layers.argmin(x, axis=0)
out = fluid.layers.argmin(x, axis=-1)
......
......@@ -37,7 +37,7 @@ auc
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int32")
predict = fluid.layers.fc(input=data, size=2)
auc_out=fluid.layers.auc(input=prediction, label=label)
auc_out=fluid.layers.auc(input=predict, label=label)
......
......@@ -54,7 +54,7 @@ beam_search
name='probs', shape=[10000], dtype='float32')
topk_scores, topk_indices = fluid.layers.topk(probs, k=beam_size)
accu_scores = fluid.layers.elementwise_add(
x=fluid.layers.log(x=topk_scores)),
x=fluid.layers.log(x=topk_scores),
y=fluid.layers.reshape(
pre_scores, shape=[-1]),
axis=0)
......
......@@ -41,7 +41,7 @@ box_clip
name='boxes', shape=[8, 4], dtype='float32', lod_level=1)
im_info = fluid.layers.data(name='im_info', shape=[3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info, inplace=True)
input=boxes, im_info=im_info)
......
......@@ -22,7 +22,7 @@ BRelu 激活函数
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype=”float32”)
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0)
......
......@@ -27,7 +27,7 @@ concat
b = fluid.layers.data(name='b', shape=[2, 3], dtype='float32')
c = fluid.layers.data(name='c', shape=[2, 2], dtype='float32')
d = fluid.layers.data(name='d', shape=[2, 5], dtype='float32')
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
out = fluid.layers.concat(input=[a, b, c, d], axis=2)
......
......@@ -24,7 +24,7 @@ create_parameter
import paddle.fluid as fluid
import paddle.fluid.layers as layers
W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
W = layers.create_parameter(shape=[784, 200], dtype='float32')
......
......@@ -33,8 +33,8 @@ create_py_reader_by_data
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtypes='float32')
label = fluid.layers.data(name='label', shape=[1], dtypes='int64')
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
......
......@@ -63,7 +63,7 @@ crop
## or
z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 3])
crop = fluid.layers.crop(z, shape=[-1, 2, 3])
......
......@@ -26,7 +26,7 @@ gaussian_random算子。
import paddle.fluid as fluid
import paddle.fluid.layers as layers
out = fluid.layers.gaussian_random(shape=[20, 30])
out = layers.gaussian_random(shape=[20, 30])
......
......@@ -63,9 +63,9 @@ step 2:
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[3, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[3, 2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]})
x = fluid.layers.data(name='x', shape=[10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
out = fluid.layers.grid_sampler(x=x, grid=grid)
......
......@@ -38,7 +38,7 @@ inverse_time_decay
decay_steps=10000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(avg_cost)
......
......@@ -37,9 +37,9 @@ label_smooth
import paddle.fluid as fluid
import paddle.fluid.layers as layers
label = fluid.layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = fluid.layers.one_hot(input=label, depth=10)
smooth_label = fluid.layers.label_smooth(
label = layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
......
......@@ -71,7 +71,7 @@ sigmoid的计算公式为: :math:`sigmoid(x) = 1 / (1 + e^{-x})` 。
init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 )
rnn_out, last_h, last_c = fluid.layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob)
rnn_out, last_h, last_c = layers.lstm(emb, init_h, init_c, max_len, hidden_size, num_layers, dropout_prob=dropout_prob)
......
......@@ -60,8 +60,8 @@ lstm单元的输入包括 :math:`x_{t}` , :math:`h_{t-1}` 和 :math:`c_{t-1}`
pre_cell = fluid.layers.data(name='pre_cell', shape=[hidden_dim], dtype='float32')
hidden = fluid.layers.lstm_unit(
x_t=x,
hidden_t_prev=prev_hidden,
cell_t_prev=prev_cell)
hidden_t_prev=pre_hidden,
cell_t_prev=pre_cell)
......
......@@ -33,7 +33,7 @@ mean_iou
import paddle.fluid as fluid
predict = fluid.layers.data(name='predict', shape=[3, 32, 32])
label = fluid.layers.data(name='label', shape=[1])
iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes)
iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes=5)
......
......@@ -50,7 +50,7 @@ py_reader
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
......
......@@ -45,14 +45,16 @@ rpn_target_assign
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.layers.data(name=’bbox_pred’, shape=[100, 4],
append_batch_size=False, dtype=’float32’)
cls_logits = fluid.layers.data(name=’cls_logits’, shape=[100, 1],
append_batch_size=False, dtype=’float32’)
anchor_box = fluid.layers.data(name=’anchor_box’, shape=[20, 4],
append_batch_size=False, dtype=’float32’)
gt_boxes = fluid.layers.data(name=’gt_boxes’, shape=[10, 4],
append_batch_size=False, dtype=’float32’)
bbox_pred = fluid.layers.data(name='bbox_pred', shape=[100, 4],
append_batch_size=False, dtype='float32')
cls_logits = fluid.layers.data(name='cls_logits', shape=[100, 1],
append_batch_size=False, dtype='float32')
anchor_box = fluid.layers.data(name='anchor_box', shape=[20, 4],
append_batch_size=False, dtype='float32')
anchor_var = fluid.layers.data(name='anchor_var', shape=[20, 4],
append_batch_size=False, dtype='float32')
gt_boxes = fluid.layers.data(name='gt_boxes', shape=[10, 4],
append_batch_size=False, dtype='float32')
is_crowd = fluid.layers.data(name='is_crowd', shape=[1],
append_batch_size=False, dtype='float32')
im_info = fluid.layers.data(name='im_infoss', shape=[1, 3],
......
......@@ -62,7 +62,7 @@ sequence_expand
x = fluid.layers.data(name='x', shape=[10], dtype='float32')
y = fluid.layers.data(name='y', shape=[10, 20],
dtype='float32', lod_level=1)
out = fluid.layers.sequence_expand(x=x, y=y, ref_level=0)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
......
......@@ -20,7 +20,7 @@ SoftRelu 激活函数
import paddle.fluid as fluid
x = fluid.layers.data(name=”x”, shape=[2,3,16,16], dtype=”float32”)
x = fluid.layers.data(name="x", shape=[3,16,16], dtype="float32")
y = fluid.layers.soft_relu(x, threshold=20.0)
......
......@@ -32,7 +32,7 @@ split
# x1.shape [-1, 3, 3, 5]
# x2.shape [-1, 3, 3, 5]
x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=2)
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=2)
# x0.shape [-1, 3, 2, 5]
# x1.shape [-1, 3, 3, 5]
# x2.shape [-1, 3, 4, 5]
......
......@@ -40,8 +40,8 @@ squeeze
import paddle.fluid as fluid
import paddle.fluid.layers as layers
x = fluid.layers.data(name='x', shape=[5, 1, 10])
y = fluid.layers.sequeeze(input=x, axes=[1])
x = layers.data(name='x', shape=[5, 1, 10])
y = layers.squeeze(input=x, axes=[1])
......
......@@ -23,9 +23,9 @@ sum算子。
import paddle.fluid as fluid
import paddle.fluid.layers as layers
input0 = fluid.layers.data(name="input0", shape=[13, 11], dtype='float32')
input0 = layers.data(name="input0", shape=[13, 11], dtype='float32')
input1 = layers.data(name="input1", shape=[13, 11], dtype='float32')
out = fluid.layers.sum([input0,input1])
out = layers.sum([input0,input1])
......
......@@ -49,7 +49,7 @@ topk
import paddle.fluid as fluid
import paddle.fluid.layers as layers
input = layers.data(name="input", shape=[13, 11], dtype='float32')
top5_values, top5_indices = fluid.layers.topk(input, k=5)
top5_values, top5_indices = layers.topk(input, k=5)
......
......@@ -31,8 +31,8 @@ uniform_random_batch_size_like算子。
import paddle.fluid as fluid
import paddle.fluid.layers as layers
input = fluid.layers.data(name="input", shape=[13, 11], dtype='float32')
out = fluid.layers.uniform_random_batch_size_like(input, [-1, 11])
input = layers.data(name="input", shape=[13, 11], dtype='float32')
out = layers.uniform_random_batch_size_like(input, [-1, 11])
......
......@@ -25,7 +25,7 @@ unsqueeze
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[5, 10])
y = fluid.layers.unsequeeze(input=x, axes=[1])
y = fluid.layers.unsqueeze(input=x, axes=[1])
......
......@@ -86,9 +86,9 @@ yolov3_loss
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32')
gt_box = fluid.layers.data(name='gtbox', shape=[6, 4], dtype='float32')
gt_label = fluid.layers.data(name='gtlabel', shape=[6], dtype='int32')
gt_score = fluid.layers.data(name='gtscore', shape=[6], dtype='float32')
gt_box = fluid.layers.data(name='gt_box', shape=[6, 4], dtype='float32')
gt_label = fluid.layers.data(name='gt_label', shape=[6], dtype='int32')
gt_score = fluid.layers.data(name='gt_score', shape=[6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
......
......@@ -31,7 +31,6 @@ EditDistance
seq_num_batch0 = batch_size
distance_evaluator.update(edit_distances_batch0, seq_num_batch0)
distance, instance_error = distance_evaluator.eval()
avg_distance, wrong_instance_ratio = distance_evaluator.eval()
print("the average edit distance for batch0 is %.2f and the wrong instance ratio is %.2f " % (avg_distance, wrong_instance_ratio))
edit_distances_batch1 = np.random.randint(low = 0, high = 10, size = (batch_size, 1))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册