From b86d7f219ffeedd62f5755f11799dad75c443abc Mon Sep 17 00:00:00 2001 From: Travis CI Date: Sun, 25 Feb 2018 02:43:28 +0000 Subject: [PATCH] Deploy to GitHub Pages: 95ea54fd7938b59483e81a57b69262e7efa410cd --- .../_sources/v2/fluid/data_feeder.rst.txt | 2 +- .../_sources/v2/fluid/evaluator.rst.txt | 4 +- .../_sources/v2/fluid/executor.rst.txt | 8 +- .../_sources/v2/fluid/initializer.rst.txt | 8 +- develop/api_doc/_sources/v2/fluid/io.rst.txt | 18 +- .../api_doc/_sources/v2/fluid/layers.rst.txt | 256 +++++++++--------- .../api_doc/_sources/v2/fluid/nets.rst.txt | 8 +- .../_sources/v2/fluid/optimizer.rst.txt | 12 +- .../_sources/v2/fluid/param_attr.rst.txt | 4 +- .../_sources/v2/fluid/profiler.rst.txt | 6 +- .../_sources/v2/fluid/regularizer.rst.txt | 6 +- develop/api_doc/v2/fluid/data_feeder.html | 2 +- develop/api_doc/v2/fluid/evaluator.html | 4 +- develop/api_doc/v2/fluid/executor.html | 8 +- develop/api_doc/v2/fluid/initializer.html | 8 +- develop/api_doc/v2/fluid/io.html | 18 +- develop/api_doc/v2/fluid/layers.html | 256 +++++++++--------- develop/api_doc/v2/fluid/nets.html | 8 +- develop/api_doc/v2/fluid/optimizer.html | 12 +- develop/api_doc/v2/fluid/param_attr.html | 4 +- develop/api_doc/v2/fluid/profiler.html | 6 +- develop/api_doc/v2/fluid/regularizer.html | 6 +- 22 files changed, 332 insertions(+), 332 deletions(-) diff --git a/develop/api_doc/_sources/v2/fluid/data_feeder.rst.txt b/develop/api_doc/_sources/v2/fluid/data_feeder.rst.txt index a591c7334fd..3df5c0307ff 100644 --- a/develop/api_doc/_sources/v2/fluid/data_feeder.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/data_feeder.rst.txt @@ -8,7 +8,7 @@ data_feeder DataFeeder ---------- -.. autoclass:: paddle.v2.fluid.data_feeder.DataFeeder +.. autoclass:: paddle.fluid.data_feeder.DataFeeder :members: :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/evaluator.rst.txt b/develop/api_doc/_sources/v2/fluid/evaluator.rst.txt index 00dcecfd628..ae9daeb7918 100644 --- a/develop/api_doc/_sources/v2/fluid/evaluator.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/evaluator.rst.txt @@ -8,14 +8,14 @@ evaluator Accuracy -------- -.. autoclass:: paddle.v2.fluid.evaluator.Accuracy +.. autoclass:: paddle.fluid.evaluator.Accuracy :members: :noindex: ChunkEvaluator -------------- -.. autoclass:: paddle.v2.fluid.evaluator.ChunkEvaluator +.. autoclass:: paddle.fluid.evaluator.ChunkEvaluator :members: :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/executor.rst.txt b/develop/api_doc/_sources/v2/fluid/executor.rst.txt index a028f6283f2..a9cdf264e49 100644 --- a/develop/api_doc/_sources/v2/fluid/executor.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/executor.rst.txt @@ -8,25 +8,25 @@ executor Executor -------- -.. autoclass:: paddle.v2.fluid.executor.Executor +.. autoclass:: paddle.fluid.executor.Executor :members: :noindex: global_scope ------------ -.. autofunction:: paddle.v2.fluid.executor.global_scope +.. autofunction:: paddle.fluid.executor.global_scope :noindex: scope_guard ----------- -.. autofunction:: paddle.v2.fluid.executor.scope_guard +.. autofunction:: paddle.fluid.executor.scope_guard :noindex: switch_scope ------------ -.. autofunction:: paddle.v2.fluid.executor.switch_scope +.. autofunction:: paddle.fluid.executor.switch_scope :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/initializer.rst.txt b/develop/api_doc/_sources/v2/fluid/initializer.rst.txt index c38be033fff..ee69925fda6 100644 --- a/develop/api_doc/_sources/v2/fluid/initializer.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/initializer.rst.txt @@ -8,28 +8,28 @@ initializer Constant -------- -.. autoclass:: paddle.v2.fluid.initializer.Constant +.. autoclass:: paddle.fluid.initializer.Constant :members: :noindex: Uniform ------- -.. autoclass:: paddle.v2.fluid.initializer.Uniform +.. autoclass:: paddle.fluid.initializer.Uniform :members: :noindex: Normal ------ -.. autoclass:: paddle.v2.fluid.initializer.Normal +.. autoclass:: paddle.fluid.initializer.Normal :members: :noindex: Xavier ------ -.. autoclass:: paddle.v2.fluid.initializer.Xavier +.. autoclass:: paddle.fluid.initializer.Xavier :members: :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/io.rst.txt b/develop/api_doc/_sources/v2/fluid/io.rst.txt index 37c9c273e36..dd9d88b6699 100644 --- a/develop/api_doc/_sources/v2/fluid/io.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/io.rst.txt @@ -8,54 +8,54 @@ io save_vars --------- -.. autofunction:: paddle.v2.fluid.io.save_vars +.. autofunction:: paddle.fluid.io.save_vars :noindex: save_params ----------- -.. autofunction:: paddle.v2.fluid.io.save_params +.. autofunction:: paddle.fluid.io.save_params :noindex: save_persistables ----------------- -.. autofunction:: paddle.v2.fluid.io.save_persistables +.. autofunction:: paddle.fluid.io.save_persistables :noindex: load_vars --------- -.. autofunction:: paddle.v2.fluid.io.load_vars +.. autofunction:: paddle.fluid.io.load_vars :noindex: load_params ----------- -.. autofunction:: paddle.v2.fluid.io.load_params +.. autofunction:: paddle.fluid.io.load_params :noindex: load_persistables ----------------- -.. autofunction:: paddle.v2.fluid.io.load_persistables +.. autofunction:: paddle.fluid.io.load_persistables :noindex: save_inference_model -------------------- -.. autofunction:: paddle.v2.fluid.io.save_inference_model +.. autofunction:: paddle.fluid.io.save_inference_model :noindex: load_inference_model -------------------- -.. autofunction:: paddle.v2.fluid.io.load_inference_model +.. autofunction:: paddle.fluid.io.load_inference_model :noindex: get_inference_program --------------------- -.. autofunction:: paddle.v2.fluid.io.get_inference_program +.. autofunction:: paddle.fluid.io.get_inference_program :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/layers.rst.txt b/develop/api_doc/_sources/v2/fluid/layers.rst.txt index 58c493fd741..ae35d8c5347 100644 --- a/develop/api_doc/_sources/v2/fluid/layers.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/layers.rst.txt @@ -11,167 +11,167 @@ control_flow split_lod_tensor ---------------- -.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor +.. autofunction:: paddle.fluid.layers.split_lod_tensor :noindex: merge_lod_tensor ---------------- -.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor +.. autofunction:: paddle.fluid.layers.merge_lod_tensor :noindex: BlockGuard ---------- -.. autoclass:: paddle.v2.fluid.layers.BlockGuard +.. autoclass:: paddle.fluid.layers.BlockGuard :members: :noindex: BlockGuardWithCompletion ------------------------ -.. autoclass:: paddle.v2.fluid.layers.BlockGuardWithCompletion +.. autoclass:: paddle.fluid.layers.BlockGuardWithCompletion :members: :noindex: StaticRNNMemoryLink ------------------- -.. autoclass:: paddle.v2.fluid.layers.StaticRNNMemoryLink +.. autoclass:: paddle.fluid.layers.StaticRNNMemoryLink :members: :noindex: WhileGuard ---------- -.. autoclass:: paddle.v2.fluid.layers.WhileGuard +.. autoclass:: paddle.fluid.layers.WhileGuard :members: :noindex: While ----- -.. autoclass:: paddle.v2.fluid.layers.While +.. autoclass:: paddle.fluid.layers.While :members: :noindex: lod_rank_table -------------- -.. autofunction:: paddle.v2.fluid.layers.lod_rank_table +.. autofunction:: paddle.fluid.layers.lod_rank_table :noindex: max_sequence_len ---------------- -.. autofunction:: paddle.v2.fluid.layers.max_sequence_len +.. autofunction:: paddle.fluid.layers.max_sequence_len :noindex: topk ---- -.. autofunction:: paddle.v2.fluid.layers.topk +.. autofunction:: paddle.fluid.layers.topk :noindex: lod_tensor_to_array ------------------- -.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array +.. autofunction:: paddle.fluid.layers.lod_tensor_to_array :noindex: array_to_lod_tensor ------------------- -.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor +.. autofunction:: paddle.fluid.layers.array_to_lod_tensor :noindex: increment --------- -.. autofunction:: paddle.v2.fluid.layers.increment +.. autofunction:: paddle.fluid.layers.increment :noindex: array_write ----------- -.. autofunction:: paddle.v2.fluid.layers.array_write +.. autofunction:: paddle.fluid.layers.array_write :noindex: create_array ------------ -.. autofunction:: paddle.v2.fluid.layers.create_array +.. autofunction:: paddle.fluid.layers.create_array :noindex: less_than --------- -.. autofunction:: paddle.v2.fluid.layers.less_than +.. autofunction:: paddle.fluid.layers.less_than :noindex: array_read ---------- -.. autofunction:: paddle.v2.fluid.layers.array_read +.. autofunction:: paddle.fluid.layers.array_read :noindex: shrink_memory ------------- -.. autofunction:: paddle.v2.fluid.layers.shrink_memory +.. autofunction:: paddle.fluid.layers.shrink_memory :noindex: array_length ------------ -.. autofunction:: paddle.v2.fluid.layers.array_length +.. autofunction:: paddle.fluid.layers.array_length :noindex: IfElse ------ -.. autoclass:: paddle.v2.fluid.layers.IfElse +.. autoclass:: paddle.fluid.layers.IfElse :members: :noindex: DynamicRNN ---------- -.. autoclass:: paddle.v2.fluid.layers.DynamicRNN +.. autoclass:: paddle.fluid.layers.DynamicRNN :members: :noindex: ConditionalBlock ---------------- -.. autoclass:: paddle.v2.fluid.layers.ConditionalBlock +.. autoclass:: paddle.fluid.layers.ConditionalBlock :members: :noindex: StaticRNN --------- -.. autoclass:: paddle.v2.fluid.layers.StaticRNN +.. autoclass:: paddle.fluid.layers.StaticRNN :members: :noindex: reorder_lod_tensor_by_rank -------------------------- -.. autofunction:: paddle.v2.fluid.layers.reorder_lod_tensor_by_rank +.. autofunction:: paddle.fluid.layers.reorder_lod_tensor_by_rank :noindex: ParallelDo ---------- -.. autoclass:: paddle.v2.fluid.layers.ParallelDo +.. autoclass:: paddle.fluid.layers.ParallelDo :members: :noindex: Print ----- -.. autofunction:: paddle.v2.fluid.layers.Print +.. autofunction:: paddle.fluid.layers.Print :noindex: device @@ -180,7 +180,7 @@ device get_places ---------- -.. autofunction:: paddle.v2.fluid.layers.get_places +.. autofunction:: paddle.fluid.layers.get_places :noindex: io @@ -189,27 +189,27 @@ io data ---- -.. autofunction:: paddle.v2.fluid.layers.data +.. autofunction:: paddle.fluid.layers.data :noindex: BlockGuardServ -------------- -.. autoclass:: paddle.v2.fluid.layers.BlockGuardServ +.. autoclass:: paddle.fluid.layers.BlockGuardServ :members: :noindex: ListenAndServ ------------- -.. autoclass:: paddle.v2.fluid.layers.ListenAndServ +.. autoclass:: paddle.fluid.layers.ListenAndServ :members: :noindex: Send ---- -.. autofunction:: paddle.v2.fluid.layers.Send +.. autofunction:: paddle.fluid.layers.Send :noindex: nn @@ -218,259 +218,259 @@ nn fc -- -.. autofunction:: paddle.v2.fluid.layers.fc +.. autofunction:: paddle.fluid.layers.fc :noindex: embedding --------- -.. autofunction:: paddle.v2.fluid.layers.embedding +.. autofunction:: paddle.fluid.layers.embedding :noindex: dynamic_lstm ------------ -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm +.. autofunction:: paddle.fluid.layers.dynamic_lstm :noindex: dynamic_lstmp ------------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp +.. autofunction:: paddle.fluid.layers.dynamic_lstmp :noindex: dynamic_gru ----------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_gru +.. autofunction:: paddle.fluid.layers.dynamic_gru :noindex: gru_unit -------- -.. autofunction:: paddle.v2.fluid.layers.gru_unit +.. autofunction:: paddle.fluid.layers.gru_unit :noindex: linear_chain_crf ---------------- -.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf +.. autofunction:: paddle.fluid.layers.linear_chain_crf :noindex: crf_decoding ------------ -.. autofunction:: paddle.v2.fluid.layers.crf_decoding +.. autofunction:: paddle.fluid.layers.crf_decoding :noindex: cos_sim ------- -.. autofunction:: paddle.v2.fluid.layers.cos_sim +.. autofunction:: paddle.fluid.layers.cos_sim :noindex: cross_entropy ------------- -.. autofunction:: paddle.v2.fluid.layers.cross_entropy +.. autofunction:: paddle.fluid.layers.cross_entropy :noindex: square_error_cost ----------------- -.. autofunction:: paddle.v2.fluid.layers.square_error_cost +.. autofunction:: paddle.fluid.layers.square_error_cost :noindex: accuracy -------- -.. autofunction:: paddle.v2.fluid.layers.accuracy +.. autofunction:: paddle.fluid.layers.accuracy :noindex: chunk_eval ---------- -.. autofunction:: paddle.v2.fluid.layers.chunk_eval +.. autofunction:: paddle.fluid.layers.chunk_eval :noindex: sequence_conv ------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_conv +.. autofunction:: paddle.fluid.layers.sequence_conv :noindex: conv2d ------ -.. autofunction:: paddle.v2.fluid.layers.conv2d +.. autofunction:: paddle.fluid.layers.conv2d :noindex: sequence_pool ------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_pool +.. autofunction:: paddle.fluid.layers.sequence_pool :noindex: pool2d ------ -.. autofunction:: paddle.v2.fluid.layers.pool2d +.. autofunction:: paddle.fluid.layers.pool2d :noindex: batch_norm ---------- -.. autofunction:: paddle.v2.fluid.layers.batch_norm +.. autofunction:: paddle.fluid.layers.batch_norm :noindex: layer_norm ---------- -.. autofunction:: paddle.v2.fluid.layers.layer_norm +.. autofunction:: paddle.fluid.layers.layer_norm :noindex: beam_search_decode ------------------ -.. autofunction:: paddle.v2.fluid.layers.beam_search_decode +.. autofunction:: paddle.fluid.layers.beam_search_decode :noindex: conv2d_transpose ---------------- -.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose +.. autofunction:: paddle.fluid.layers.conv2d_transpose :noindex: sequence_expand --------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_expand +.. autofunction:: paddle.fluid.layers.sequence_expand :noindex: lstm_unit --------- -.. autofunction:: paddle.v2.fluid.layers.lstm_unit +.. autofunction:: paddle.fluid.layers.lstm_unit :noindex: reduce_sum ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_sum +.. autofunction:: paddle.fluid.layers.reduce_sum :noindex: reduce_mean ----------- -.. autofunction:: paddle.v2.fluid.layers.reduce_mean +.. autofunction:: paddle.fluid.layers.reduce_mean :noindex: reduce_max ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_max +.. autofunction:: paddle.fluid.layers.reduce_max :noindex: reduce_min ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_min +.. autofunction:: paddle.fluid.layers.reduce_min :noindex: sequence_first_step ------------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_first_step +.. autofunction:: paddle.fluid.layers.sequence_first_step :noindex: sequence_last_step ------------------ -.. autofunction:: paddle.v2.fluid.layers.sequence_last_step +.. autofunction:: paddle.fluid.layers.sequence_last_step :noindex: dropout ------- -.. autofunction:: paddle.v2.fluid.layers.dropout +.. autofunction:: paddle.fluid.layers.dropout :noindex: split ----- -.. autofunction:: paddle.v2.fluid.layers.split +.. autofunction:: paddle.fluid.layers.split :noindex: ctc_greedy_decoder ------------------ -.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder +.. autofunction:: paddle.fluid.layers.ctc_greedy_decoder :noindex: edit_distance ------------- -.. autofunction:: paddle.v2.fluid.layers.edit_distance +.. autofunction:: paddle.fluid.layers.edit_distance :noindex: l2_normalize ------------ -.. autofunction:: paddle.v2.fluid.layers.l2_normalize +.. autofunction:: paddle.fluid.layers.l2_normalize :noindex: matmul ------ -.. autofunction:: paddle.v2.fluid.layers.matmul +.. autofunction:: paddle.fluid.layers.matmul :noindex: warpctc ------- -.. autofunction:: paddle.v2.fluid.layers.warpctc +.. autofunction:: paddle.fluid.layers.warpctc :noindex: sequence_reshape ---------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_reshape +.. autofunction:: paddle.fluid.layers.sequence_reshape :noindex: transpose --------- -.. autofunction:: paddle.v2.fluid.layers.transpose +.. autofunction:: paddle.fluid.layers.transpose :noindex: im2sequence ----------- -.. autofunction:: paddle.v2.fluid.layers.im2sequence +.. autofunction:: paddle.fluid.layers.im2sequence :noindex: nce --- -.. autofunction:: paddle.v2.fluid.layers.nce +.. autofunction:: paddle.fluid.layers.nce :noindex: beam_search ----------- -.. autofunction:: paddle.v2.fluid.layers.beam_search +.. autofunction:: paddle.fluid.layers.beam_search :noindex: row_conv -------- -.. autofunction:: paddle.v2.fluid.layers.row_conv +.. autofunction:: paddle.fluid.layers.row_conv :noindex: multiplex --------- -.. autofunction:: paddle.v2.fluid.layers.multiplex +.. autofunction:: paddle.fluid.layers.multiplex :noindex: ops @@ -479,259 +479,259 @@ ops mean ---- -.. autofunction:: paddle.v2.fluid.layers.mean +.. autofunction:: paddle.fluid.layers.mean :noindex: mul --- -.. autofunction:: paddle.v2.fluid.layers.mul +.. autofunction:: paddle.fluid.layers.mul :noindex: reshape ------- -.. autofunction:: paddle.v2.fluid.layers.reshape +.. autofunction:: paddle.fluid.layers.reshape :noindex: scale ----- -.. autofunction:: paddle.v2.fluid.layers.scale +.. autofunction:: paddle.fluid.layers.scale :noindex: sigmoid_cross_entropy_with_logits --------------------------------- -.. autofunction:: paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits +.. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits :noindex: elementwise_add --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_add +.. autofunction:: paddle.fluid.layers.elementwise_add :noindex: elementwise_div --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_div +.. autofunction:: paddle.fluid.layers.elementwise_div :noindex: elementwise_sub --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_sub +.. autofunction:: paddle.fluid.layers.elementwise_sub :noindex: elementwise_mul --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_mul +.. autofunction:: paddle.fluid.layers.elementwise_mul :noindex: elementwise_max --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_max +.. autofunction:: paddle.fluid.layers.elementwise_max :noindex: elementwise_min --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_min +.. autofunction:: paddle.fluid.layers.elementwise_min :noindex: elementwise_pow --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_pow +.. autofunction:: paddle.fluid.layers.elementwise_pow :noindex: clip ---- -.. autofunction:: paddle.v2.fluid.layers.clip +.. autofunction:: paddle.fluid.layers.clip :noindex: clip_by_norm ------------ -.. autofunction:: paddle.v2.fluid.layers.clip_by_norm +.. autofunction:: paddle.fluid.layers.clip_by_norm :noindex: sequence_softmax ---------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_softmax +.. autofunction:: paddle.fluid.layers.sequence_softmax :noindex: sigmoid ------- -.. autofunction:: paddle.v2.fluid.layers.sigmoid +.. autofunction:: paddle.fluid.layers.sigmoid :noindex: logsigmoid ---------- -.. autofunction:: paddle.v2.fluid.layers.logsigmoid +.. autofunction:: paddle.fluid.layers.logsigmoid :noindex: exp --- -.. autofunction:: paddle.v2.fluid.layers.exp +.. autofunction:: paddle.fluid.layers.exp :noindex: relu ---- -.. autofunction:: paddle.v2.fluid.layers.relu +.. autofunction:: paddle.fluid.layers.relu :noindex: tanh ---- -.. autofunction:: paddle.v2.fluid.layers.tanh +.. autofunction:: paddle.fluid.layers.tanh :noindex: tanh_shrink ----------- -.. autofunction:: paddle.v2.fluid.layers.tanh_shrink +.. autofunction:: paddle.fluid.layers.tanh_shrink :noindex: softshrink ---------- -.. autofunction:: paddle.v2.fluid.layers.softshrink +.. autofunction:: paddle.fluid.layers.softshrink :noindex: sqrt ---- -.. autofunction:: paddle.v2.fluid.layers.sqrt +.. autofunction:: paddle.fluid.layers.sqrt :noindex: abs --- -.. autofunction:: paddle.v2.fluid.layers.abs +.. autofunction:: paddle.fluid.layers.abs :noindex: ceil ---- -.. autofunction:: paddle.v2.fluid.layers.ceil +.. autofunction:: paddle.fluid.layers.ceil :noindex: floor ----- -.. autofunction:: paddle.v2.fluid.layers.floor +.. autofunction:: paddle.fluid.layers.floor :noindex: round ----- -.. autofunction:: paddle.v2.fluid.layers.round +.. autofunction:: paddle.fluid.layers.round :noindex: reciprocal ---------- -.. autofunction:: paddle.v2.fluid.layers.reciprocal +.. autofunction:: paddle.fluid.layers.reciprocal :noindex: log --- -.. autofunction:: paddle.v2.fluid.layers.log +.. autofunction:: paddle.fluid.layers.log :noindex: square ------ -.. autofunction:: paddle.v2.fluid.layers.square +.. autofunction:: paddle.fluid.layers.square :noindex: softplus -------- -.. autofunction:: paddle.v2.fluid.layers.softplus +.. autofunction:: paddle.fluid.layers.softplus :noindex: softsign -------- -.. autofunction:: paddle.v2.fluid.layers.softsign +.. autofunction:: paddle.fluid.layers.softsign :noindex: brelu ----- -.. autofunction:: paddle.v2.fluid.layers.brelu +.. autofunction:: paddle.fluid.layers.brelu :noindex: leaky_relu ---------- -.. autofunction:: paddle.v2.fluid.layers.leaky_relu +.. autofunction:: paddle.fluid.layers.leaky_relu :noindex: soft_relu --------- -.. autofunction:: paddle.v2.fluid.layers.soft_relu +.. autofunction:: paddle.fluid.layers.soft_relu :noindex: elu --- -.. autofunction:: paddle.v2.fluid.layers.elu +.. autofunction:: paddle.fluid.layers.elu :noindex: relu6 ----- -.. autofunction:: paddle.v2.fluid.layers.relu6 +.. autofunction:: paddle.fluid.layers.relu6 :noindex: pow --- -.. autofunction:: paddle.v2.fluid.layers.pow +.. autofunction:: paddle.fluid.layers.pow :noindex: stanh ----- -.. autofunction:: paddle.v2.fluid.layers.stanh +.. autofunction:: paddle.fluid.layers.stanh :noindex: hard_shrink ----------- -.. autofunction:: paddle.v2.fluid.layers.hard_shrink +.. autofunction:: paddle.fluid.layers.hard_shrink :noindex: thresholded_relu ---------------- -.. autofunction:: paddle.v2.fluid.layers.thresholded_relu +.. autofunction:: paddle.fluid.layers.thresholded_relu :noindex: hard_sigmoid ------------ -.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid +.. autofunction:: paddle.fluid.layers.hard_sigmoid :noindex: swish ----- -.. autofunction:: paddle.v2.fluid.layers.swish +.. autofunction:: paddle.fluid.layers.swish :noindex: tensor @@ -740,66 +740,66 @@ tensor create_tensor ------------- -.. autofunction:: paddle.v2.fluid.layers.create_tensor +.. autofunction:: paddle.fluid.layers.create_tensor :noindex: create_parameter ---------------- -.. autofunction:: paddle.v2.fluid.layers.create_parameter +.. autofunction:: paddle.fluid.layers.create_parameter :noindex: create_global_var ----------------- -.. autofunction:: paddle.v2.fluid.layers.create_global_var +.. autofunction:: paddle.fluid.layers.create_global_var :noindex: cast ---- -.. autofunction:: paddle.v2.fluid.layers.cast +.. autofunction:: paddle.fluid.layers.cast :noindex: concat ------ -.. autofunction:: paddle.v2.fluid.layers.concat +.. autofunction:: paddle.fluid.layers.concat :noindex: sums ---- -.. autofunction:: paddle.v2.fluid.layers.sums +.. autofunction:: paddle.fluid.layers.sums :noindex: assign ------ -.. autofunction:: paddle.v2.fluid.layers.assign +.. autofunction:: paddle.fluid.layers.assign :noindex: fill_constant_batch_size_like ----------------------------- -.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like +.. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like :noindex: fill_constant ------------- -.. autofunction:: paddle.v2.fluid.layers.fill_constant +.. autofunction:: paddle.fluid.layers.fill_constant :noindex: ones ---- -.. autofunction:: paddle.v2.fluid.layers.ones +.. autofunction:: paddle.fluid.layers.ones :noindex: zeros ----- -.. autofunction:: paddle.v2.fluid.layers.zeros +.. autofunction:: paddle.fluid.layers.zeros :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/nets.rst.txt b/develop/api_doc/_sources/v2/fluid/nets.rst.txt index 015581b7660..7ae3187304f 100644 --- a/develop/api_doc/_sources/v2/fluid/nets.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/nets.rst.txt @@ -8,24 +8,24 @@ nets simple_img_conv_pool -------------------- -.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool +.. autofunction:: paddle.fluid.nets.simple_img_conv_pool :noindex: sequence_conv_pool ------------------ -.. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool +.. autofunction:: paddle.fluid.nets.sequence_conv_pool :noindex: glu --- -.. autofunction:: paddle.v2.fluid.nets.glu +.. autofunction:: paddle.fluid.nets.glu :noindex: scaled_dot_product_attention ---------------------------- -.. autofunction:: paddle.v2.fluid.nets.scaled_dot_product_attention +.. autofunction:: paddle.fluid.nets.scaled_dot_product_attention :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/optimizer.rst.txt b/develop/api_doc/_sources/v2/fluid/optimizer.rst.txt index 1691ebb9a7c..9b165f87045 100644 --- a/develop/api_doc/_sources/v2/fluid/optimizer.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/optimizer.rst.txt @@ -8,42 +8,42 @@ optimizer SGD --- -.. autoclass:: paddle.v2.fluid.optimizer.SGD +.. autoclass:: paddle.fluid.optimizer.SGD :members: :noindex: Momentum -------- -.. autoclass:: paddle.v2.fluid.optimizer.Momentum +.. autoclass:: paddle.fluid.optimizer.Momentum :members: :noindex: Adagrad ------- -.. autoclass:: paddle.v2.fluid.optimizer.Adagrad +.. autoclass:: paddle.fluid.optimizer.Adagrad :members: :noindex: Adam ---- -.. autoclass:: paddle.v2.fluid.optimizer.Adam +.. autoclass:: paddle.fluid.optimizer.Adam :members: :noindex: Adamax ------ -.. autoclass:: paddle.v2.fluid.optimizer.Adamax +.. autoclass:: paddle.fluid.optimizer.Adamax :members: :noindex: DecayedAdagrad -------------- -.. autoclass:: paddle.v2.fluid.optimizer.DecayedAdagrad +.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad :members: :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/param_attr.rst.txt b/develop/api_doc/_sources/v2/fluid/param_attr.rst.txt index 8083d0d858d..8e4ddb2b049 100644 --- a/develop/api_doc/_sources/v2/fluid/param_attr.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/param_attr.rst.txt @@ -8,14 +8,14 @@ param_attr ParamAttr --------- -.. autoclass:: paddle.v2.fluid.param_attr.ParamAttr +.. autoclass:: paddle.fluid.param_attr.ParamAttr :members: :noindex: WeightNormParamAttr ------------------- -.. autoclass:: paddle.v2.fluid.param_attr.WeightNormParamAttr +.. autoclass:: paddle.fluid.param_attr.WeightNormParamAttr :members: :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/profiler.rst.txt b/develop/api_doc/_sources/v2/fluid/profiler.rst.txt index 4a1ff7cb697..74d102dcb0d 100644 --- a/develop/api_doc/_sources/v2/fluid/profiler.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/profiler.rst.txt @@ -8,18 +8,18 @@ profiler cuda_profiler ------------- -.. autofunction:: paddle.v2.fluid.profiler.cuda_profiler +.. autofunction:: paddle.fluid.profiler.cuda_profiler :noindex: reset_profiler -------------- -.. autofunction:: paddle.v2.fluid.profiler.reset_profiler +.. autofunction:: paddle.fluid.profiler.reset_profiler :noindex: profiler -------- -.. autofunction:: paddle.v2.fluid.profiler.profiler +.. autofunction:: paddle.fluid.profiler.profiler :noindex: diff --git a/develop/api_doc/_sources/v2/fluid/regularizer.rst.txt b/develop/api_doc/_sources/v2/fluid/regularizer.rst.txt index 2c17d15599b..dc9740c4639 100644 --- a/develop/api_doc/_sources/v2/fluid/regularizer.rst.txt +++ b/develop/api_doc/_sources/v2/fluid/regularizer.rst.txt @@ -8,20 +8,20 @@ regularizer append_regularization_ops ------------------------- -.. autofunction:: paddle.v2.fluid.regularizer.append_regularization_ops +.. autofunction:: paddle.fluid.regularizer.append_regularization_ops :noindex: L1Decay ------- -.. autoclass:: paddle.v2.fluid.regularizer.L1Decay +.. autoclass:: paddle.fluid.regularizer.L1Decay :members: :noindex: L2Decay ------- -.. autoclass:: paddle.v2.fluid.regularizer.L2Decay +.. autoclass:: paddle.fluid.regularizer.L2Decay :members: :noindex: diff --git a/develop/api_doc/v2/fluid/data_feeder.html b/develop/api_doc/v2/fluid/data_feeder.html index 5f492de8fed..7083f753e26 100644 --- a/develop/api_doc/v2/fluid/data_feeder.html +++ b/develop/api_doc/v2/fluid/data_feeder.html @@ -179,7 +179,7 @@

DataFeeder

-class paddle.v2.fluid.data_feeder.DataFeeder(feed_list, place, program=None)
+class paddle.fluid.data_feeder.DataFeeder(feed_list, place, program=None)
diff --git a/develop/api_doc/v2/fluid/evaluator.html b/develop/api_doc/v2/fluid/evaluator.html index 31d88d40c33..16b4f4576e9 100644 --- a/develop/api_doc/v2/fluid/evaluator.html +++ b/develop/api_doc/v2/fluid/evaluator.html @@ -179,7 +179,7 @@

Accuracy

-class paddle.v2.fluid.evaluator.Accuracy(input, label, k=1, **kwargs)
+class paddle.fluid.evaluator.Accuracy(input, label, k=1, **kwargs)

Average Accuracy for multiple mini-batches.

@@ -188,7 +188,7 @@

ChunkEvaluator

-class paddle.v2.fluid.evaluator.ChunkEvaluator(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None)
+class paddle.fluid.evaluator.ChunkEvaluator(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None)

Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers.

diff --git a/develop/api_doc/v2/fluid/executor.html b/develop/api_doc/v2/fluid/executor.html index 2030c379433..3d2dc7a512f 100644 --- a/develop/api_doc/v2/fluid/executor.html +++ b/develop/api_doc/v2/fluid/executor.html @@ -179,7 +179,7 @@

Executor

-class paddle.v2.fluid.executor.Executor(places)
+class paddle.fluid.executor.Executor(places)
@@ -187,7 +187,7 @@

global_scope

-paddle.v2.fluid.executor.global_scope()
+paddle.fluid.executor.global_scope()
@@ -195,7 +195,7 @@

scope_guard

-paddle.v2.fluid.executor.scope_guard(*args, **kwds)
+paddle.fluid.executor.scope_guard(*args, **kwds)
@@ -203,7 +203,7 @@

switch_scope

-paddle.v2.fluid.executor.switch_scope(scope)
+paddle.fluid.executor.switch_scope(scope)
diff --git a/develop/api_doc/v2/fluid/initializer.html b/develop/api_doc/v2/fluid/initializer.html index 2633f09debc..f6e14e4d1d2 100644 --- a/develop/api_doc/v2/fluid/initializer.html +++ b/develop/api_doc/v2/fluid/initializer.html @@ -179,7 +179,7 @@

Constant

-paddle.v2.fluid.initializer.Constant
+paddle.fluid.initializer.Constant

alias of ConstantInitializer

@@ -188,7 +188,7 @@

Uniform

-paddle.v2.fluid.initializer.Uniform
+paddle.fluid.initializer.Uniform

alias of UniformInitializer

@@ -197,7 +197,7 @@

Normal

-paddle.v2.fluid.initializer.Normal
+paddle.fluid.initializer.Normal

alias of NormalInitializer

@@ -206,7 +206,7 @@

Xavier

-paddle.v2.fluid.initializer.Xavier
+paddle.fluid.initializer.Xavier

alias of XavierInitializer

diff --git a/develop/api_doc/v2/fluid/io.html b/develop/api_doc/v2/fluid/io.html index 6b3c1df88db..c4be3fcfdb3 100644 --- a/develop/api_doc/v2/fluid/io.html +++ b/develop/api_doc/v2/fluid/io.html @@ -178,7 +178,7 @@

save_vars

-paddle.v2.fluid.io.save_vars(executor, dirname, main_program=None, vars=None, predicate=None, save_file_name=None)
+paddle.fluid.io.save_vars(executor, dirname, main_program=None, vars=None, predicate=None, save_file_name=None)

Save variables to directory by executor.

@@ -215,7 +215,7 @@ If it is None, save variables to separate files.

save_params

-paddle.v2.fluid.io.save_params(executor, dirname, main_program=None, save_file_name=None)
+paddle.fluid.io.save_params(executor, dirname, main_program=None, save_file_name=None)

Save all parameters to directory with executor.

@@ -224,7 +224,7 @@ If it is None, save variables to separate files.

save_persistables

-paddle.v2.fluid.io.save_persistables(executor, dirname, main_program=None, save_file_name=None)
+paddle.fluid.io.save_persistables(executor, dirname, main_program=None, save_file_name=None)

Save all persistables to directory with executor.

@@ -233,7 +233,7 @@ If it is None, save variables to separate files.

load_vars

-paddle.v2.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, load_file_name=None)
+paddle.fluid.io.load_vars(executor, dirname, main_program=None, vars=None, predicate=None, load_file_name=None)

Load variables from directory by executor.

@@ -270,7 +270,7 @@ If it is None, load variables from separate files.

load_params

-paddle.v2.fluid.io.load_params(executor, dirname, main_program=None, load_file_name=None)
+paddle.fluid.io.load_params(executor, dirname, main_program=None, load_file_name=None)

load all parameters from directory by executor.

@@ -279,7 +279,7 @@ If it is None, load variables from separate files.

load_persistables

-paddle.v2.fluid.io.load_persistables(executor, dirname, main_program=None, load_file_name=None)
+paddle.fluid.io.load_persistables(executor, dirname, main_program=None, load_file_name=None)

load all persistables from directory by executor.

@@ -288,7 +288,7 @@ If it is None, load variables from separate files.

save_inference_model

-paddle.v2.fluid.io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, save_file_name=None)
+paddle.fluid.io.save_inference_model(dirname, feeded_var_names, target_vars, executor, main_program=None, save_file_name=None)

Build a model especially for inference, and save it to directory by the executor.

@@ -324,7 +324,7 @@ Default default_main_program().

load_inference_model

-paddle.v2.fluid.io.load_inference_model(dirname, executor, load_file_name=None)
+paddle.fluid.io.load_inference_model(dirname, executor, load_file_name=None)

Load inference model from a directory

@@ -358,7 +358,7 @@ fetch_targets: Variables from which we can get inference results.

get_inference_program

-paddle.v2.fluid.io.get_inference_program(target_vars, main_program=None)
+paddle.fluid.io.get_inference_program(target_vars, main_program=None)
diff --git a/develop/api_doc/v2/fluid/layers.html b/develop/api_doc/v2/fluid/layers.html index 655290e5b56..bc65c74cabe 100644 --- a/develop/api_doc/v2/fluid/layers.html +++ b/develop/api_doc/v2/fluid/layers.html @@ -181,7 +181,7 @@

split_lod_tensor

-paddle.v2.fluid.layers.split_lod_tensor(input, mask, level=0)
+paddle.fluid.layers.split_lod_tensor(input, mask, level=0)

split_lod_tensor

This function takes in an input that contains the complete lod information, and takes in a mask which is used to mask certain parts of the input. @@ -226,7 +226,7 @@ Variable: The false branch of tensor as per the mask applied to input.

merge_lod_tensor

-paddle.v2.fluid.layers.merge_lod_tensor(in_true, in_false, x, mask, level=0)
+paddle.fluid.layers.merge_lod_tensor(in_true, in_false, x, mask, level=0)

merge_lod_tensor

This function takes in an input \(x\), the True branch, the False branch and a binary \(mask\). Using this information, this function @@ -275,7 +275,7 @@ lod information needed to construct the output.

BlockGuard

-class paddle.v2.fluid.layers.BlockGuard(main_program)
+class paddle.fluid.layers.BlockGuard(main_program)

BlockGuard class.

BlockGuard class is used to create a sub-block in a program by using the Python with keyword.

@@ -286,7 +286,7 @@ using the Python with keyword.

BlockGuardWithCompletion

-class paddle.v2.fluid.layers.BlockGuardWithCompletion(rnn)
+class paddle.fluid.layers.BlockGuardWithCompletion(rnn)

BlockGuardWithCompletion class.

BlockGuardWithCompletion class is used to create an op with a block in a program.

@@ -296,7 +296,7 @@ using the Python with keyword.

StaticRNNMemoryLink

-class paddle.v2.fluid.layers.StaticRNNMemoryLink(init, pre_mem, mem=None)
+class paddle.fluid.layers.StaticRNNMemoryLink(init, pre_mem, mem=None)

StaticRNNMemoryLink class.

@@ -323,7 +323,7 @@ memory cells of a StaticRNN.

WhileGuard

-class paddle.v2.fluid.layers.WhileGuard(while_op)
+class paddle.fluid.layers.WhileGuard(while_op)
@@ -331,7 +331,7 @@ memory cells of a StaticRNN.

While

-class paddle.v2.fluid.layers.While(cond, name=None)
+class paddle.fluid.layers.While(cond, name=None)
@@ -339,7 +339,7 @@ memory cells of a StaticRNN.

lod_rank_table

-paddle.v2.fluid.layers.lod_rank_table(x, level=0)
+paddle.fluid.layers.lod_rank_table(x, level=0)

LoD Rank Table Operator. Given an input variable x and a level number of LoD, this layer creates a LodRankTable object. A LoDRankTable object contains a list of bi-element tuples. Each tuple consists of an index and @@ -402,7 +402,7 @@ table.

max_sequence_len

-paddle.v2.fluid.layers.max_sequence_len(rank_table)
+paddle.fluid.layers.max_sequence_len(rank_table)

Max Sequence Len Operator. Given a LoDRankTable object, this layer returns the max length of a batch of sequences. In fact, a LoDRankTable object contains a list of tuples(<sequence index, sequence length>) and @@ -434,7 +434,7 @@ operator just returns the sequence length of the first tuple element.

topk

-paddle.v2.fluid.layers.topk(input, k)
+paddle.fluid.layers.topk(input, k)

topk

This function performs the operation that selects the k entries in the input vector and outputs their values and indices as vectors. Thus topk_out[j] is @@ -478,7 +478,7 @@ the j-th largest entry in input, and its index is topk_indices[j]

lod_tensor_to_array

-paddle.v2.fluid.layers.lod_tensor_to_array(x, table)
+paddle.fluid.layers.lod_tensor_to_array(x, table)

Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY.

@@ -518,7 +518,7 @@ descending order.

array_to_lod_tensor

-paddle.v2.fluid.layers.array_to_lod_tensor(x, table)
+paddle.fluid.layers.array_to_lod_tensor(x, table)

Convert a LoD_Tensor_Aarry to an LoDTensor.

@@ -559,7 +559,7 @@ descending order.

increment

-paddle.v2.fluid.layers.increment(x, value=1.0, in_place=True)
+paddle.fluid.layers.increment(x, value=1.0, in_place=True)

This function performs an operation that increments each value in the input \(x\) by an amount: \(value\) as mentioned in the input parameter. This operation is performed in-place by default.

@@ -599,7 +599,7 @@ parameter. This operation is performed in-place by default.

array_write

-paddle.v2.fluid.layers.array_write(x, i, array=None)
+paddle.fluid.layers.array_write(x, i, array=None)

This function writes the given input variable to the specified position indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the output LOD_TENSOR_ARRAY is not given(None), a new one will be created and @@ -636,7 +636,7 @@ returned.

create_array

-paddle.v2.fluid.layers.create_array(dtype)
+paddle.fluid.layers.create_array(dtype)

This function creates an array of type \(LOD_TENSOR_ARRAY\) using the LayerHelper.

@@ -662,7 +662,7 @@ LayerHelper.

less_than

-paddle.v2.fluid.layers.less_than(x, y, cond=None, **ignored)
+paddle.fluid.layers.less_than(x, y, cond=None, **ignored)

Less than

This layer returns the truth value of \(x < y\) elementwise.

@@ -695,7 +695,7 @@ LayerHelper.

array_read

-paddle.v2.fluid.layers.array_read(array, i)
+paddle.fluid.layers.array_read(array, i)

This function performs the operation to read the data in as an LOD_TENSOR_ARRAY. :param array: The input tensor that will be written to an array. @@ -721,7 +721,7 @@ LOD_TENSOR_ARRAY.

shrink_memory

-paddle.v2.fluid.layers.shrink_memory(x, i, table)
+paddle.fluid.layers.shrink_memory(x, i, table)

This function creates an operator to shrink_rnn_memory using the RankTable as mentioned in the input parameter.

@@ -731,7 +731,7 @@ as mentioned in the input parameter.

array_length

-paddle.v2.fluid.layers.array_length(array)
+paddle.fluid.layers.array_length(array)

This function performs the operation to find the length of the input LOD_TENSOR_ARRAY.

@@ -755,7 +755,7 @@ to compute the length.

IfElse

-class paddle.v2.fluid.layers.IfElse(cond, name=None)
+class paddle.fluid.layers.IfElse(cond, name=None)
@@ -763,7 +763,7 @@ to compute the length.

DynamicRNN

-class paddle.v2.fluid.layers.DynamicRNN(name=None)
+class paddle.fluid.layers.DynamicRNN(name=None)
@@ -771,7 +771,7 @@ to compute the length.

ConditionalBlock

-class paddle.v2.fluid.layers.ConditionalBlock(inputs, is_scalar_condition=False, name=None)
+class paddle.fluid.layers.ConditionalBlock(inputs, is_scalar_condition=False, name=None)
@@ -779,7 +779,7 @@ to compute the length.

StaticRNN

-class paddle.v2.fluid.layers.StaticRNN(name=None)
+class paddle.fluid.layers.StaticRNN(name=None)

StaticRNN class.

StaticRNN class is used to create a StaticRNN. The RNN will have its own parameters like inputs, outputs, memories, status and length.

@@ -811,7 +811,7 @@ own parameters like inputs, outputs, memories, status and length.

reorder_lod_tensor_by_rank

-paddle.v2.fluid.layers.reorder_lod_tensor_by_rank(x, rank_table)
+paddle.fluid.layers.reorder_lod_tensor_by_rank(x, rank_table)

ReorderLoDTensorByRankTable operator.

Input(X) is a batch of sequences. Input(RankTable) stores new orders of the input sequence batch. The reorder_lod_tensor_by_rank operator reorders the @@ -859,7 +859,7 @@ Duplicable: False Optional: False

ParallelDo

-class paddle.v2.fluid.layers.ParallelDo(places, use_nccl=False, name=None)
+class paddle.fluid.layers.ParallelDo(places, use_nccl=False, name=None)

ParallelDo class.

ParallelDo class is used to create a ParallelDo.

@@ -869,7 +869,7 @@ Duplicable: False Optional: False

Print

-paddle.v2.fluid.layers.Print(input, first_n=-1, message=None, summarize=-1, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, print_tensor_lod=True, print_phase='both')
+paddle.fluid.layers.Print(input, first_n=-1, message=None, summarize=-1, print_tensor_name=True, print_tensor_type=True, print_tensor_shape=True, print_tensor_lod=True, print_phase='both')

Print operator

This creates a print op that will print when a tensor is accessed.

Wraps the tensor passed in so that whenever that a tensor is accessed, @@ -921,7 +921,7 @@ Print(value, summarize=10,

get_places

-paddle.v2.fluid.layers.get_places(device_count=None, device_type=None)
+paddle.fluid.layers.get_places(device_count=None, device_type=None)

Returns a list of places based on flags. The list will be used for parallel execution.

@@ -949,7 +949,7 @@ execution.

data

-paddle.v2.fluid.layers.data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=VarType.LOD_TENSOR, stop_gradient=True)
+paddle.fluid.layers.data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=VarType.LOD_TENSOR, stop_gradient=True)

Data Layer

This function takes in the input and based on whether data has to be returned back as a minibatch, it creates the global variable by using @@ -993,7 +993,7 @@ to the LayerHelper constructor.

BlockGuardServ

-class paddle.v2.fluid.layers.BlockGuardServ(server)
+class paddle.fluid.layers.BlockGuardServ(server)

BlockGuardServ class.

BlockGuardServ class is used to create an op with a block in a program.

@@ -1003,7 +1003,7 @@ to the LayerHelper constructor.

ListenAndServ

-class paddle.v2.fluid.layers.ListenAndServ(endpoint, fan_in=1, optimizer_mode=True)
+class paddle.fluid.layers.ListenAndServ(endpoint, fan_in=1, optimizer_mode=True)

ListenAndServ class.

ListenAndServ class is used to wrap listen_and_serv op to create a server which can receive variables from clients and run a block.

@@ -1014,7 +1014,7 @@ which can receive variables from clients and run a block.

Send

-paddle.v2.fluid.layers.Send(endpoints, send_vars, get_vars)
+paddle.fluid.layers.Send(endpoints, send_vars, get_vars)

Send layer

@@ -1042,7 +1042,7 @@ side when server have finished running server side program.

fc

-paddle.v2.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None)
+paddle.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None)

Fully Connected Layer

The fully connected layer can take multiple tensors as its inputs. It creates a variable (one for each input tensor) called weights for each @@ -1130,7 +1130,7 @@ layer.

embedding

-paddle.v2.fluid.layers.embedding(input, size, is_sparse=False, padding_idx=None, param_attr=None, dtype='float32')
+paddle.fluid.layers.embedding(input, size, is_sparse=False, padding_idx=None, param_attr=None, dtype='float32')

Embedding Layer

This layer is used to lookup embeddings of IDs, provided by input, in a lookup table. The result of this lookup is the embedding of each ID in the @@ -1178,7 +1178,7 @@ with zeros whenever lookup encounters it in

-paddle.v2.fluid.layers.dynamic_lstm(input, size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', dtype='float32', name=None)
+paddle.fluid.layers.dynamic_lstm(input, size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', dtype='float32', name=None)

Dynamic LSTM Layer

The defalut implementation is diagonal/peephole connection (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows:

@@ -1285,7 +1285,7 @@ will be named automatically.

dynamic_lstmp

-paddle.v2.fluid.layers.dynamic_lstmp(input, size, proj_size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', dtype='float32', name=None)
+paddle.fluid.layers.dynamic_lstmp(input, size, proj_size, param_attr=None, bias_attr=None, use_peepholes=True, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', proj_activation='tanh', dtype='float32', name=None)

Dynamic LSTMP Layer

LSTMP (LSTM with recurrent projection) layer has a separate projection layer after the LSTM layer, projecting the original hidden state to a @@ -1410,7 +1410,7 @@ will be named automatically.

dynamic_gru

-paddle.v2.fluid.layers.dynamic_gru(input, size, param_attr=None, bias_attr=None, is_reverse=False, gate_activation='sigmoid', candidate_activation='tanh', h_0=None)
+paddle.fluid.layers.dynamic_gru(input, size, param_attr=None, bias_attr=None, is_reverse=False, gate_activation='sigmoid', candidate_activation='tanh', h_0=None)

Dynamic GRU Layer

Refer to Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling

@@ -1478,7 +1478,7 @@ Choices = [“sigmoid”, “tanh”, “relu”, “

gru_unit

-paddle.v2.fluid.layers.gru_unit(input, hidden, size, weight=None, bias=None, activation='tanh', gate_activation='sigmoid')
+paddle.fluid.layers.gru_unit(input, hidden, size, weight=None, bias=None, activation='tanh', gate_activation='sigmoid')

GRU unit layer. The equation of a gru step is:

@@ -1533,7 +1533,7 @@ Default: ‘sigmoid’

linear_chain_crf

-paddle.v2.fluid.layers.linear_chain_crf(input, label, param_attr=None)
+paddle.fluid.layers.linear_chain_crf(input, label, param_attr=None)
@@ -1541,7 +1541,7 @@ Default: ‘sigmoid’

crf_decoding

-paddle.v2.fluid.layers.crf_decoding(input, param_attr, label=None)
+paddle.fluid.layers.crf_decoding(input, param_attr, label=None)
@@ -1549,7 +1549,7 @@ Default: ‘sigmoid’

cos_sim

-paddle.v2.fluid.layers.cos_sim(X, Y)
+paddle.fluid.layers.cos_sim(X, Y)

This function performs the cosine similarity between two tensors X and Y and returns that as the output.

@@ -1559,7 +1559,7 @@ X and Y and returns that as the output.

cross_entropy

-paddle.v2.fluid.layers.cross_entropy(input, label, soft_label=False)
+paddle.fluid.layers.cross_entropy(input, label, soft_label=False)

Cross Entropy Layer

This layer computes the cross entropy between input and label. It supports both standard cross-entropy and soft-label cross-entropy loss @@ -1642,7 +1642,7 @@ labels, default False.

square_error_cost

-paddle.v2.fluid.layers.square_error_cost(input, label)
+paddle.fluid.layers.square_error_cost(input, label)

Square error cost layer

This layer accepts input predictions and target label and returns the squared error cost.

@@ -1688,7 +1688,7 @@ squared error cost.

accuracy

-paddle.v2.fluid.layers.accuracy(input, label, k=1, correct=None, total=None)
+paddle.fluid.layers.accuracy(input, label, k=1, correct=None, total=None)

This function computes the accuracy using the input and label. The output is the top_k inputs and their indices.

@@ -1698,7 +1698,7 @@ The output is the top_k inputs and their indices.

chunk_eval

-paddle.v2.fluid.layers.chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None)
+paddle.fluid.layers.chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None)

This function computes and outputs the precision, recall and F1-score of chunk detection.

@@ -1708,7 +1708,7 @@ F1-score of chunk detection.

sequence_conv

-paddle.v2.fluid.layers.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=None, bias_attr=None, param_attr=None, act=None)
+paddle.fluid.layers.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=None, bias_attr=None, param_attr=None, act=None)

This function creates the op for sequence_conv, using the inputs and other convolutional configurations for the filters and stride as given in the input parameters to the function.

@@ -1719,7 +1719,7 @@ in the input parameters to the function.

conv2d

-paddle.v2.fluid.layers.conv2d(input, num_filters, filter_size, stride=None, padding=None, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None)
+paddle.fluid.layers.conv2d(input, num_filters, filter_size, stride=None, padding=None, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None)

Convlution2D Layer

The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and @@ -1816,7 +1816,7 @@ groups mismatch.

sequence_pool

-paddle.v2.fluid.layers.sequence_pool(input, pool_type)
+paddle.fluid.layers.sequence_pool(input, pool_type)

This function add the operator for sequence pooling. It pools features of all time-steps of each instance, and is applied on top of the input using pool_type mentioned in the parameters.

@@ -1876,7 +1876,7 @@ It supports average, sum, sqrt and max.

pool2d

-paddle.v2.fluid.layers.pool2d(input, pool_size, pool_type, pool_stride=None, pool_padding=None, global_pooling=False, use_cudnn=True, name=None)
+paddle.fluid.layers.pool2d(input, pool_size, pool_type, pool_stride=None, pool_padding=None, global_pooling=False, use_cudnn=True, name=None)

This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters.

@@ -1886,7 +1886,7 @@ pooling configurations mentioned in input parameters.

batch_norm

-paddle.v2.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None)
+paddle.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None)

This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters.

@@ -1896,7 +1896,7 @@ the BatchNorm layer using the configurations from the input parameters.

layer_norm

-paddle.v2.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None)
+paddle.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None)

Layer Normalization

Assume feature vectors exist on dimensions begin_norm_axis ... rank(input) and calculate the moment statistics @@ -1951,7 +1951,7 @@ bias \(b\).

beam_search_decode

-paddle.v2.fluid.layers.beam_search_decode(ids, scores, name=None)
+paddle.fluid.layers.beam_search_decode(ids, scores, name=None)
@@ -1959,7 +1959,7 @@ bias \(b\).

conv2d_transpose

-paddle.v2.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=None, stride=None, dilation=None, param_attr=None, use_cudnn=True, name=None)
+paddle.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=None, stride=None, dilation=None, param_attr=None, use_cudnn=True, name=None)

Convlution2D transpose layer

The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) @@ -2056,7 +2056,7 @@ groups mismatch.

sequence_expand

-paddle.v2.fluid.layers.sequence_expand(x, y, name=None)
+paddle.fluid.layers.sequence_expand(x, y, name=None)

Sequence Expand Layer. This layer will expand the input variable x according to LoD information of y. And the following examples will explain how sequence_expand works:

@@ -2129,7 +2129,7 @@ will be named automatically.

lstm_unit

-paddle.v2.fluid.layers.lstm_unit(x_t, hidden_t_prev, cell_t_prev, forget_bias=0.0, param_attr=None, bias_attr=None, name=None)
+paddle.fluid.layers.lstm_unit(x_t, hidden_t_prev, cell_t_prev, forget_bias=0.0, param_attr=None, bias_attr=None, name=None)

Lstm unit layer. The equation of a lstm step is:

@@ -2203,7 +2203,7 @@ and cell_t_prev not be the same or the 2nd dimensions of

reduce_sum

-paddle.v2.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None)
+paddle.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None)

Computes the sum of tensor elements over the given dimension.

@@ -2250,7 +2250,7 @@ will be named automatically.

reduce_mean

-paddle.v2.fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None)
+paddle.fluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None)

Computes the mean of tensor elements over the given dimension.

@@ -2297,7 +2297,7 @@ will be named automatically.

reduce_max

-paddle.v2.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None)
+paddle.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None)

Computes the maximum of tensor elements over the given dimension.

@@ -2344,7 +2344,7 @@ will be named automatically.

reduce_min

-paddle.v2.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None)
+paddle.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None)

Computes the minimum of tensor elements over the given dimension.

@@ -2391,7 +2391,7 @@ will be named automatically.

sequence_first_step

-paddle.v2.fluid.layers.sequence_first_step(input)
+paddle.fluid.layers.sequence_first_step(input)

This funciton get the first step of sequence.

x is a 1-level LoDTensor:
   x.lod = [[0, 2, 5, 7]]
@@ -2427,7 +2427,7 @@ then output is a Tensor:
 

sequence_last_step

-paddle.v2.fluid.layers.sequence_last_step(input)
+paddle.fluid.layers.sequence_last_step(input)

This funciton get the last step of sequence.

x is a 1-level LoDTensor:
   x.lod = [[0, 2, 5, 7]]
@@ -2463,7 +2463,7 @@ then output is a Tensor:
 

dropout

-paddle.v2.fluid.layers.dropout(x, dropout_prob, is_test=False, seed=None)
+paddle.fluid.layers.dropout(x, dropout_prob, is_test=False, seed=None)

Computes dropout.

Drop or keep each element of x independently. Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during @@ -2505,7 +2505,7 @@ units will be dropped. DO NOT use a fixed seed in training.

split

-paddle.v2.fluid.layers.split(input, num_or_sections, dim=-1, name=None)
+paddle.fluid.layers.split(input, num_or_sections, dim=-1, name=None)

Split the input tensor into multiple sub-tensors.

@@ -2553,7 +2553,7 @@ will be named automatically.

ctc_greedy_decoder

-paddle.v2.fluid.layers.ctc_greedy_decoder(input, blank, name=None)
+paddle.fluid.layers.ctc_greedy_decoder(input, blank, name=None)

This op is used to decode sequences by greedy policy by below steps: 1. Get the indexes of max value for each row in input. a.k.a.

@@ -2625,7 +2625,7 @@ empty, the result LoDTensor will be [-1] with LoD [[0]] and dims [1, 1].

edit_distance

-paddle.v2.fluid.layers.edit_distance(input, label, normalized=False, ignored_tokens=None, name=None)
+paddle.fluid.layers.edit_distance(input, label, normalized=False, ignored_tokens=None, name=None)

EditDistance operator computes the edit distances between a batch of hypothesis strings and their references. Edit distance, also called Levenshtein distance, measures how dissimilar two strings are by counting @@ -2678,7 +2678,7 @@ calculating edit distance.

l2_normalize

-paddle.v2.fluid.layers.l2_normalize(x, axis, epsilon=1e-12, name=None)
+paddle.fluid.layers.l2_normalize(x, axis, epsilon=1e-12, name=None)

L2 normalize Layer

The l2 normalize layer normalizes x along dimension axis using an L2 norm. For a 1-D tensor (dim is fixed to 0), this layer computes

@@ -2722,7 +2722,7 @@ will be named automatically.

matmul

-paddle.v2.fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, name=None)
+paddle.fluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, name=None)

Applies matrix multiplication to two tensors.

Currently, the input tensors’ rank can be any, but when the rank of any inputs is bigger than 3, this two inputs’ rank should be equal.

@@ -2800,7 +2800,7 @@ will be named automatically.

warpctc

-paddle.v2.fluid.layers.warpctc(input, label, blank=0, norm_by_times=False)
+paddle.fluid.layers.warpctc(input, label, blank=0, norm_by_times=False)

An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) to compute Connectionist Temporal Classification (CTC) loss. @@ -2849,7 +2849,7 @@ which is a 2-D Tensor of the shape [batch_size, 1].

sequence_reshape

-paddle.v2.fluid.layers.sequence_reshape(input, new_dim)
+paddle.fluid.layers.sequence_reshape(input, new_dim)

Sequence Reshape Layer

This layer will rearrange the input sequences. The new dimension is set by user. Length of each sequence is computed according to original length, @@ -2905,7 +2905,7 @@ with shape being [N, M] where M for dimension.

transpose

-paddle.v2.fluid.layers.transpose(x, perm, name=None)
+paddle.fluid.layers.transpose(x, perm, name=None)

transpose Layer

Permute the dimensions of input according to perm.

The i-th dimension of the returned tensor will correspond to the @@ -2940,7 +2940,7 @@ perm[i]-th dimension of input.

im2sequence

-paddle.v2.fluid.layers.im2sequence(input, filter_size=1, stride=1, padding=0, name=None)
+paddle.fluid.layers.im2sequence(input, filter_size=1, stride=1, padding=0, name=None)

Extracts image patches from the input tensor to form a tensor of shape {input.batch_size * output_height * output_width, filter_size_H * filter_size_W * input.channels} which is similar with im2col. @@ -3045,7 +3045,7 @@ output.lod = [[0, 4, 8]]

nce

-paddle.v2.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None)
+paddle.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None)

Compute and return the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). By default this operator uses a uniform distribution for sampling.

@@ -3082,7 +3082,7 @@ Duplicable: False Optional: True

beam_search

-paddle.v2.fluid.layers.beam_search(pre_ids, ids, scores, beam_size, end_id, level=0)
+paddle.fluid.layers.beam_search(pre_ids, ids, scores, beam_size, end_id, level=0)

This function implements the beam search algorithm.

@@ -3091,7 +3091,7 @@ Duplicable: False Optional: True

row_conv

-paddle.v2.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None)
+paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None)

Row Conv Operator. This layer will apply lookahead convolution to input. The input variable should be a 2D LoDTensor with shape [T, D]. Parameters with shape [future_context_size + 1, D] will be created. The math @@ -3142,7 +3142,7 @@ name, initializer etc.

multiplex

-paddle.v2.fluid.layers.multiplex(inputs, index)
+paddle.fluid.layers.multiplex(inputs, index)

Multiplex Layer

Referring to the given index variable, this layer selects rows from the input variables to construct a multiplex variable. Assuming that there are @@ -3196,7 +3196,7 @@ with shape [M, 1] where M is the batch size.

mean

-paddle.v2.fluid.layers.mean(**kwargs)
+paddle.fluid.layers.mean(**kwargs)

Mean Operator.

Out is a scalar which is the mean of all elements in X.

@@ -3217,7 +3217,7 @@ Duplicable: False Optional: False

mul

-paddle.v2.fluid.layers.mul(**kwargs)
+paddle.fluid.layers.mul(**kwargs)

Mul Operator.

This operator is used to perform matrix multiplication for input $X$ and $Y$.

The equation is:

@@ -3268,7 +3268,7 @@ flattened. See comments of x_num_col_dims for more details.

reshape

-paddle.v2.fluid.layers.reshape(**kwargs)
+paddle.fluid.layers.reshape(**kwargs)

Reshape Operator.

Reshape Input(X) into the shape specified by Attr(shape).

An example: @@ -3301,7 +3301,7 @@ Duplicable: False Optional: False

scale

-paddle.v2.fluid.layers.scale(**kwargs)
+paddle.fluid.layers.scale(**kwargs)

Scale operator

$$Out = scale*X$$

@@ -3327,7 +3327,7 @@ Duplicable: False Optional: False

sigmoid_cross_entropy_with_logits

-paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits(**kwargs)
+paddle.fluid.layers.sigmoid_cross_entropy_with_logits(**kwargs)

SigmoidCrossEntropyWithLogits Operator.

This measures the element-wise probability error in classification tasks in which each class is independent. This can be thought of as predicting labels @@ -3370,7 +3370,7 @@ Duplicable: False Optional: False

elementwise_add

-paddle.v2.fluid.layers.elementwise_add(**kwargs)
+paddle.fluid.layers.elementwise_add(**kwargs)

Limited Elementwise Add Operator.

The equation is:

$$Out = X + Y$$

@@ -3420,7 +3420,7 @@ Duplicable: False Optional: False

elementwise_div

-paddle.v2.fluid.layers.elementwise_div(**kwargs)
+paddle.fluid.layers.elementwise_div(**kwargs)

Limited Elementwise Div Operator.

The equation is:

$$Out = X / Y$$

@@ -3470,7 +3470,7 @@ Duplicable: False Optional: False

elementwise_sub

-paddle.v2.fluid.layers.elementwise_sub(**kwargs)
+paddle.fluid.layers.elementwise_sub(**kwargs)

Limited Elementwise Sub Operator.

The equation is:

$$Out = X - Y$$

@@ -3520,7 +3520,7 @@ Duplicable: False Optional: False

elementwise_mul

-paddle.v2.fluid.layers.elementwise_mul(**kwargs)
+paddle.fluid.layers.elementwise_mul(**kwargs)

Limited Elementwise Mul Operator.

The equation is:

$$Out = X odotY$$

@@ -3570,7 +3570,7 @@ Duplicable: False Optional: False

elementwise_max

-paddle.v2.fluid.layers.elementwise_max(**kwargs)
+paddle.fluid.layers.elementwise_max(**kwargs)

Limited Elementwise Max Operator.

The equation is:

$$Out = max(X, Y)$$

@@ -3620,7 +3620,7 @@ Duplicable: False Optional: False

elementwise_min

-paddle.v2.fluid.layers.elementwise_min(**kwargs)
+paddle.fluid.layers.elementwise_min(**kwargs)

Limited Elementwise Max Operator.

The equation is:

$$Out = min(X, Y)$$

@@ -3670,7 +3670,7 @@ Duplicable: False Optional: False

elementwise_pow

-paddle.v2.fluid.layers.elementwise_pow(**kwargs)
+paddle.fluid.layers.elementwise_pow(**kwargs)

Limited Elementwise Pow Operator.

The equation is:

$$Out = X ^ Y$$

@@ -3720,7 +3720,7 @@ Duplicable: False Optional: False

clip

-paddle.v2.fluid.layers.clip(**kwargs)
+paddle.fluid.layers.clip(**kwargs)

Clip Operator.

The clip operator limits the value of given input within an interval. The interval is specified with arguments ‘min’ and ‘max’:

@@ -3751,7 +3751,7 @@ Duplicable: False Optional: False

clip_by_norm

-paddle.v2.fluid.layers.clip_by_norm(**kwargs)
+paddle.fluid.layers.clip_by_norm(**kwargs)

ClipByNorm Operator.

This operator limits the L2 norm of the input $X$ within $max_norm$. If the L2 norm of $X$ is less than or equal to $max_norm$, $Out$ will be @@ -3785,7 +3785,7 @@ Duplicable: False Optional: False

sequence_softmax

-paddle.v2.fluid.layers.sequence_softmax(**kwargs)
+paddle.fluid.layers.sequence_softmax(**kwargs)

Sequence Softmax Operator.

SequenceSoftmaxOp computes the softmax activation among all time-steps for each sequence. The dimension of each time-step should be 1. Thus, the shape of @@ -3819,7 +3819,7 @@ Duplicable: False Optional: False

sigmoid

-paddle.v2.fluid.layers.sigmoid(**kwargs)
+paddle.fluid.layers.sigmoid(**kwargs)

Sigmoid Activation Operator

$$out = frac{1}{1 + e^{-x}}$$

@@ -3840,7 +3840,7 @@ Duplicable: False Optional: False

logsigmoid

-paddle.v2.fluid.layers.logsigmoid(**kwargs)
+paddle.fluid.layers.logsigmoid(**kwargs)

Logsigmoid Activation Operator

$$out = log frac{1}{1 + e^{-x}}$$

@@ -3861,7 +3861,7 @@ Duplicable: False Optional: False

exp

-paddle.v2.fluid.layers.exp(**kwargs)
+paddle.fluid.layers.exp(**kwargs)

Exp Activation Operator.

$out = e^x$

@@ -3882,7 +3882,7 @@ Duplicable: False Optional: False

relu

-paddle.v2.fluid.layers.relu(**kwargs)
+paddle.fluid.layers.relu(**kwargs)

Relu Activation Operator.

$out = max(x, 0)$

@@ -3903,7 +3903,7 @@ Duplicable: False Optional: False

tanh

-paddle.v2.fluid.layers.tanh(**kwargs)
+paddle.fluid.layers.tanh(**kwargs)

Tanh Activation Operator.

$$out = frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$

@@ -3924,7 +3924,7 @@ Duplicable: False Optional: False

tanh_shrink

-paddle.v2.fluid.layers.tanh_shrink(**kwargs)
+paddle.fluid.layers.tanh_shrink(**kwargs)

TanhShrink Activation Operator.

$$out = x - frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$

@@ -3945,7 +3945,7 @@ Duplicable: False Optional: False

softshrink

-paddle.v2.fluid.layers.softshrink(**kwargs)
+paddle.fluid.layers.softshrink(**kwargs)

Softshrink Activation Operator.

$$ out = begin{cases}

@@ -3978,7 +3978,7 @@ Duplicable: False Optional: False

sqrt

-paddle.v2.fluid.layers.sqrt(**kwargs)
+paddle.fluid.layers.sqrt(**kwargs)

Sqrt Activation Operator.

$out = sqrt{x}$

@@ -3999,7 +3999,7 @@ Duplicable: False Optional: False

abs

-paddle.v2.fluid.layers.abs(**kwargs)
+paddle.fluid.layers.abs(**kwargs)

Abs Activation Operator.

$out = |x|$

@@ -4020,7 +4020,7 @@ Duplicable: False Optional: False

ceil

-paddle.v2.fluid.layers.ceil(**kwargs)
+paddle.fluid.layers.ceil(**kwargs)

Ceil Activation Operator.

$out = ceil(x)$

@@ -4041,7 +4041,7 @@ Duplicable: False Optional: False

floor

-paddle.v2.fluid.layers.floor(**kwargs)
+paddle.fluid.layers.floor(**kwargs)

Floor Activation Operator.

$out = floor(x)$

@@ -4062,7 +4062,7 @@ Duplicable: False Optional: False

round

-paddle.v2.fluid.layers.round(**kwargs)
+paddle.fluid.layers.round(**kwargs)

Round Activation Operator.

$out = [x]$

@@ -4083,7 +4083,7 @@ Duplicable: False Optional: False

reciprocal

-paddle.v2.fluid.layers.reciprocal(**kwargs)
+paddle.fluid.layers.reciprocal(**kwargs)

Reciprocal Activation Operator.

$$out = frac{1}{x}$$

@@ -4104,7 +4104,7 @@ Duplicable: False Optional: False

log

-paddle.v2.fluid.layers.log(**kwargs)
+paddle.fluid.layers.log(**kwargs)

Log Activation Operator.

$out = ln(x)$

Natural logarithm of x.

@@ -4126,7 +4126,7 @@ Duplicable: False Optional: False

square

-paddle.v2.fluid.layers.square(**kwargs)
+paddle.fluid.layers.square(**kwargs)

Square Activation Operator.

$out = x^2$

@@ -4147,7 +4147,7 @@ Duplicable: False Optional: False

softplus

-paddle.v2.fluid.layers.softplus(**kwargs)
+paddle.fluid.layers.softplus(**kwargs)

Softplus Activation Operator.

$out = ln(1 + e^{x})$

@@ -4168,7 +4168,7 @@ Duplicable: False Optional: False

softsign

-paddle.v2.fluid.layers.softsign(**kwargs)
+paddle.fluid.layers.softsign(**kwargs)

Softsign Activation Operator.

$$out = frac{x}{1 + |x|}$$

@@ -4189,7 +4189,7 @@ Duplicable: False Optional: False

brelu

-paddle.v2.fluid.layers.brelu(**kwargs)
+paddle.fluid.layers.brelu(**kwargs)

BRelu Activation Operator.

$out = max(min(x, t_{min}), t_{max})$

@@ -4216,7 +4216,7 @@ Duplicable: False Optional: False

leaky_relu

-paddle.v2.fluid.layers.leaky_relu(**kwargs)
+paddle.fluid.layers.leaky_relu(**kwargs)

LeakyRelu Activation Operator.

$out = max(x, alpha * x)$

@@ -4242,7 +4242,7 @@ Duplicable: False Optional: False

soft_relu

-paddle.v2.fluid.layers.soft_relu(**kwargs)
+paddle.fluid.layers.soft_relu(**kwargs)

SoftRelu Activation Operator.

$out = ln(1 + exp(max(min(x, threshold), threshold))$

@@ -4268,7 +4268,7 @@ Duplicable: False Optional: False

elu

-paddle.v2.fluid.layers.elu(**kwargs)
+paddle.fluid.layers.elu(**kwargs)

ELU Activation Operator.

Applies the following element-wise computation on the input according to https://arxiv.org/abs/1511.07289.

@@ -4296,7 +4296,7 @@ Duplicable: False Optional: False

relu6

-paddle.v2.fluid.layers.relu6(**kwargs)
+paddle.fluid.layers.relu6(**kwargs)

Relu6 Activation Operator.

$out = min(max(0, x), 6)$

@@ -4322,7 +4322,7 @@ Duplicable: False Optional: False

pow

-paddle.v2.fluid.layers.pow(**kwargs)
+paddle.fluid.layers.pow(**kwargs)

Pow Activation Operator.

$out = x^{factor}$

@@ -4348,7 +4348,7 @@ Duplicable: False Optional: False

stanh

-paddle.v2.fluid.layers.stanh(**kwargs)
+paddle.fluid.layers.stanh(**kwargs)

STanh Activation Operator.

$$out = b * frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$

@@ -4375,7 +4375,7 @@ Duplicable: False Optional: False

hard_shrink

-paddle.v2.fluid.layers.hard_shrink(**kwargs)
+paddle.fluid.layers.hard_shrink(**kwargs)

HardShrink Activation Operator.

$$ out = begin{cases}

@@ -4408,7 +4408,7 @@ Duplicable: False Optional: False

thresholded_relu

-paddle.v2.fluid.layers.thresholded_relu(**kwargs)
+paddle.fluid.layers.thresholded_relu(**kwargs)

ThresholdedRelu Activation Operator.

$$ out = begin{cases}

@@ -4440,7 +4440,7 @@ Duplicable: False Optional: False

hard_sigmoid

-paddle.v2.fluid.layers.hard_sigmoid(**kwargs)
+paddle.fluid.layers.hard_sigmoid(**kwargs)

HardSigmoid Activation Operator.

Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), which is much faster than sigmoid.

@@ -4472,7 +4472,7 @@ Duplicable: False Optional: False

swish

-paddle.v2.fluid.layers.swish(**kwargs)
+paddle.fluid.layers.swish(**kwargs)

Swish Activation Operator.

$$out = frac{x}{1 + e^{- beta x}}$$

@@ -4501,7 +4501,7 @@ Duplicable: False Optional: False

create_tensor

-paddle.v2.fluid.layers.create_tensor(dtype, name=None, persistable=False)
+paddle.fluid.layers.create_tensor(dtype, name=None, persistable=False)
@@ -4509,7 +4509,7 @@ Duplicable: False Optional: False

create_parameter

-paddle.v2.fluid.layers.create_parameter(shape, dtype, name=None, attr=None, is_bias=False, default_initializer=None)
+paddle.fluid.layers.create_parameter(shape, dtype, name=None, attr=None, is_bias=False, default_initializer=None)

Create a parameter :param shape: shape of the parameter :type shape: list[int] @@ -4541,7 +4541,7 @@ Xavier() will be used.

create_global_var

-paddle.v2.fluid.layers.create_global_var(shape, value, dtype, persistable=False, force_cpu=False, name=None)
+paddle.fluid.layers.create_global_var(shape, value, dtype, persistable=False, force_cpu=False, name=None)

Create a global variable. such as global_step :param shape: shape of the variable :type shape: list[int] @@ -4570,7 +4570,7 @@ Xavier() will be used.

cast

-paddle.v2.fluid.layers.cast(x, dtype)
+paddle.fluid.layers.cast(x, dtype)

This function takes in the input with input_dtype and casts it to the output_dtype as the output.

@@ -4580,7 +4580,7 @@ and casts it to the output_dtype as the output.

concat

-paddle.v2.fluid.layers.concat(input, axis=0)
+paddle.fluid.layers.concat(input, axis=0)

Concat

This function concatenates the input along the axis mentioned and returns that as the output.

@@ -4610,7 +4610,7 @@ and returns that as the output.

sums

-paddle.v2.fluid.layers.sums(input, out=None)
+paddle.fluid.layers.sums(input, out=None)

This function performs the sum operation on the input and returns the result as the output.

@@ -4638,7 +4638,7 @@ that need to be summed up.

assign

-paddle.v2.fluid.layers.assign(input, output)
+paddle.fluid.layers.assign(input, output)

Assign

This function copies the input Variable to the output Variable.

@@ -4667,7 +4667,7 @@ that need to be summed up.

fill_constant_batch_size_like

-paddle.v2.fluid.layers.fill_constant_batch_size_like(input, shape, dtype, value, input_dim_idx=0, output_dim_idx=0)
+paddle.fluid.layers.fill_constant_batch_size_like(input, shape, dtype, value, input_dim_idx=0, output_dim_idx=0)

fill_constant_batch_size_like

This function creates a tensor of specified shape, dtype and batch size, and initializes this with a constant supplied in value. The batch size is @@ -4707,7 +4707,7 @@ obtained from the input tensor.

fill_constant

-paddle.v2.fluid.layers.fill_constant(shape, dtype, value, force_cpu=False, out=None)
+paddle.fluid.layers.fill_constant(shape, dtype, value, force_cpu=False, out=None)

fill_constant

This function creates a tensor with specified shape and dtype, and initializes it with a constant specifed by value.

@@ -4744,7 +4744,7 @@ initializes it with a constant specifed by value.

ones

-paddle.v2.fluid.layers.ones(shape, dtype, force_cpu=False)
+paddle.fluid.layers.ones(shape, dtype, force_cpu=False)

ones

This function creates a tensor of specified shape and dtype, and initializes this with 1.

@@ -4778,7 +4778,7 @@ initializes it with a constant specifed by value.

zeros

-paddle.v2.fluid.layers.zeros(shape, dtype, force_cpu=False)
+paddle.fluid.layers.zeros(shape, dtype, force_cpu=False)

zeros

This function creates a tensor of specified shape and dtype, and initializes this with 0.

diff --git a/develop/api_doc/v2/fluid/nets.html b/develop/api_doc/v2/fluid/nets.html index e0bf9248762..778526b656d 100644 --- a/develop/api_doc/v2/fluid/nets.html +++ b/develop/api_doc/v2/fluid/nets.html @@ -179,7 +179,7 @@

simple_img_conv_pool

-paddle.v2.fluid.nets.simple_img_conv_pool(input, num_filters, filter_size, pool_size, pool_stride, act, param_attr=None, pool_type='max', use_cudnn=True)
+paddle.fluid.nets.simple_img_conv_pool(input, num_filters, filter_size, pool_size, pool_stride, act, param_attr=None, pool_type='max', use_cudnn=True)
@@ -187,7 +187,7 @@

sequence_conv_pool

-paddle.v2.fluid.nets.sequence_conv_pool(input, num_filters, filter_size, param_attr=None, act='sigmoid', pool_type='max')
+paddle.fluid.nets.sequence_conv_pool(input, num_filters, filter_size, param_attr=None, act='sigmoid', pool_type='max')
@@ -195,7 +195,7 @@

glu

-paddle.v2.fluid.nets.glu(input, dim=-1)
+paddle.fluid.nets.glu(input, dim=-1)

The gated linear unit composed by split, sigmoid activation and elementwise multiplication. Specifically, Split the input into two equal sized parts \(a\) and \(b\) along the given dimension and then compute as @@ -236,7 +236,7 @@ dimension to split along is \(rank(input) + dim\).scaled_dot_product_attention

-paddle.v2.fluid.nets.scaled_dot_product_attention(queries, keys, values, num_heads=1, dropout_rate=0.0)
+paddle.fluid.nets.scaled_dot_product_attention(queries, keys, values, num_heads=1, dropout_rate=0.0)

The dot-product attention.

Attention mechanism can be seen as mapping a query and a set of key-value pairs to an output. The output is computed as a weighted sum of the values, diff --git a/develop/api_doc/v2/fluid/optimizer.html b/develop/api_doc/v2/fluid/optimizer.html index dfc78115477..c38982b1025 100644 --- a/develop/api_doc/v2/fluid/optimizer.html +++ b/develop/api_doc/v2/fluid/optimizer.html @@ -179,7 +179,7 @@

SGD

-paddle.v2.fluid.optimizer.SGD
+paddle.fluid.optimizer.SGD

alias of SGDOptimizer

@@ -188,7 +188,7 @@

Momentum

-paddle.v2.fluid.optimizer.Momentum
+paddle.fluid.optimizer.Momentum

alias of MomentumOptimizer

@@ -197,7 +197,7 @@

Adagrad

-paddle.v2.fluid.optimizer.Adagrad
+paddle.fluid.optimizer.Adagrad

alias of AdagradOptimizer

@@ -206,7 +206,7 @@

Adam

-paddle.v2.fluid.optimizer.Adam
+paddle.fluid.optimizer.Adam

alias of AdamOptimizer

@@ -215,7 +215,7 @@

Adamax

-paddle.v2.fluid.optimizer.Adamax
+paddle.fluid.optimizer.Adamax

alias of AdamaxOptimizer

@@ -224,7 +224,7 @@

DecayedAdagrad

-paddle.v2.fluid.optimizer.DecayedAdagrad
+paddle.fluid.optimizer.DecayedAdagrad

alias of DecayedAdagradOptimizer

diff --git a/develop/api_doc/v2/fluid/param_attr.html b/develop/api_doc/v2/fluid/param_attr.html index 5af6e34b298..bcfd766b03e 100644 --- a/develop/api_doc/v2/fluid/param_attr.html +++ b/develop/api_doc/v2/fluid/param_attr.html @@ -179,7 +179,7 @@

ParamAttr

-class paddle.v2.fluid.param_attr.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None)
+class paddle.fluid.param_attr.ParamAttr(name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, gradient_clip=None)
@@ -187,7 +187,7 @@

WeightNormParamAttr

-class paddle.v2.fluid.param_attr.WeightNormParamAttr(dim=None, **kwargs)
+class paddle.fluid.param_attr.WeightNormParamAttr(dim=None, **kwargs)

Used for weight normalization. Any field in ParamAttr can also be set here. Besides, an extra field dim can be set to indicate the dimension except which to normalize.

diff --git a/develop/api_doc/v2/fluid/profiler.html b/develop/api_doc/v2/fluid/profiler.html index 8c41777ff67..3c12bf0bfec 100644 --- a/develop/api_doc/v2/fluid/profiler.html +++ b/develop/api_doc/v2/fluid/profiler.html @@ -179,7 +179,7 @@

cuda_profiler

-paddle.v2.fluid.profiler.cuda_profiler(*args, **kwds)
+paddle.fluid.profiler.cuda_profiler(*args, **kwds)

The CUDA profiler. This fuctions is used to profile CUDA program by CUDA runtime application programming interface. The profiling result will be written into @@ -211,7 +211,7 @@ to “Compute Command Line Profiler User Guide”.

reset_profiler

-paddle.v2.fluid.profiler.reset_profiler()
+paddle.fluid.profiler.reset_profiler()

The profiler clear interface. reset_profiler will clear the previous time record.

@@ -221,7 +221,7 @@ reset_profiler will clear the previous time record.

profiler

-paddle.v2.fluid.profiler.profiler(*args, **kwds)
+paddle.fluid.profiler.profiler(*args, **kwds)

The profiler interface. Different from cuda_profiler, this profiler can be used to profile both CPU and GPU program. By defalut, it records the CPU and GPU operator kernels, diff --git a/develop/api_doc/v2/fluid/regularizer.html b/develop/api_doc/v2/fluid/regularizer.html index e9289f21aae..6e629f048fb 100644 --- a/develop/api_doc/v2/fluid/regularizer.html +++ b/develop/api_doc/v2/fluid/regularizer.html @@ -179,7 +179,7 @@

append_regularization_ops

-paddle.v2.fluid.regularizer.append_regularization_ops(parameters_and_grads, regularization=None)
+paddle.fluid.regularizer.append_regularization_ops(parameters_and_grads, regularization=None)

Create and add backward regularization Operators

Creates and adds backward regularization operators in the BlockDesc. This will add gradients of the regularizer function to the gradients @@ -212,7 +212,7 @@ set. It will be applied with regularizer.

L1Decay

-paddle.v2.fluid.regularizer.L1Decay
+paddle.fluid.regularizer.L1Decay

alias of L1DecayRegularizer

@@ -221,7 +221,7 @@ set. It will be applied with regularizer.

L2Decay

-paddle.v2.fluid.regularizer.L2Decay
+paddle.fluid.regularizer.L2Decay

alias of L2DecayRegularizer

-- GitLab