提交 a50230ad 编写于 作者: T typhoonzero

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into add_raw_var_type

......@@ -22,7 +22,8 @@ COPY ./paddle/scripts/docker/root/ /root/
RUN apt-get update && \
apt-get install -y \
git python-pip python-dev openssh-server bison libnccl-dev \
git python-pip python-dev openssh-server bison \
libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \
wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
curl sed grep graphviz libjpeg-dev zlib1g-dev \
python-matplotlib gcc-4.8 g++-4.8 \
......
API
===
.. toctree::
:maxdepth: 1
模型配置 <v2/model_configs.rst>
数据访问 <v2/data.rst>
训练与应用 <v2/run_logic.rst>
v2/fluid.rst
......@@ -4,6 +4,7 @@ API
.. toctree::
:maxdepth: 1
overview.rst
v2/model_configs.rst
v2/data.rst
v2/run_logic.rst
......
V2 API Overview
================
The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle),
it proposes some high-level concepts such as `Layers <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/layer.html>`_ , `Optimizer <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/optimizer.html>`_ , `Evaluator <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/evaluators.html>`_ and `Data Reader <http://www.paddlepaddle.org/docs/develop/api/en/v2/data/data_reader.html>`_ to make the model configuration more familiar to users.
A model is composed of the computation described by a group of `Layers`, with `Evaluator` to define the error, `Optimizer` to update the parameters and `Data Reader` to feed in the data.
We also provide the `interface for Training and Inference <http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html>`_ to help control the training and inference phrase,
it has several easy to use methods
- `paddle.train`
- `paddle.test`
- `paddle.infer`
to better expose the internal running details, different `events <http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html#event>`_ are available to users by writing some callbacks.
......@@ -8,7 +8,7 @@ data_feeder
DataFeeder
----------
.. autoclass:: paddle.v2.fluid.data_feeder.DataFeeder
.. autoclass:: paddle.fluid.data_feeder.DataFeeder
:members:
:noindex:
......@@ -8,14 +8,14 @@ evaluator
Accuracy
--------
.. autoclass:: paddle.v2.fluid.evaluator.Accuracy
.. autoclass:: paddle.fluid.evaluator.Accuracy
:members:
:noindex:
ChunkEvaluator
--------------
.. autoclass:: paddle.v2.fluid.evaluator.ChunkEvaluator
.. autoclass:: paddle.fluid.evaluator.ChunkEvaluator
:members:
:noindex:
......@@ -8,25 +8,25 @@ executor
Executor
--------
.. autoclass:: paddle.v2.fluid.executor.Executor
.. autoclass:: paddle.fluid.executor.Executor
:members:
:noindex:
global_scope
------------
.. autofunction:: paddle.v2.fluid.executor.global_scope
.. autofunction:: paddle.fluid.executor.global_scope
:noindex:
scope_guard
-----------
.. autofunction:: paddle.v2.fluid.executor.scope_guard
.. autofunction:: paddle.fluid.executor.scope_guard
:noindex:
switch_scope
------------
.. autofunction:: paddle.v2.fluid.executor.switch_scope
.. autofunction:: paddle.fluid.executor.switch_scope
:noindex:
......@@ -17,7 +17,7 @@ import argparse
import sys
import types
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
def parse_arg():
......@@ -70,7 +70,7 @@ class DocGenerator(object):
def print_class(self, name):
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autoclass:: paddle.v2.fluid.{0}.{1}
self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1}
:members:
:noindex:
......@@ -78,7 +78,7 @@ class DocGenerator(object):
def print_method(self, name):
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autofunction:: paddle.v2.fluid.{0}.{1}
self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1}
:noindex:
'''.format(self.module_name, name))
......
......@@ -8,28 +8,28 @@ initializer
Constant
--------
.. autoclass:: paddle.v2.fluid.initializer.Constant
.. autoclass:: paddle.fluid.initializer.Constant
:members:
:noindex:
Uniform
-------
.. autoclass:: paddle.v2.fluid.initializer.Uniform
.. autoclass:: paddle.fluid.initializer.Uniform
:members:
:noindex:
Normal
------
.. autoclass:: paddle.v2.fluid.initializer.Normal
.. autoclass:: paddle.fluid.initializer.Normal
:members:
:noindex:
Xavier
------
.. autoclass:: paddle.v2.fluid.initializer.Xavier
.. autoclass:: paddle.fluid.initializer.Xavier
:members:
:noindex:
......@@ -8,54 +8,54 @@ io
save_vars
---------
.. autofunction:: paddle.v2.fluid.io.save_vars
.. autofunction:: paddle.fluid.io.save_vars
:noindex:
save_params
-----------
.. autofunction:: paddle.v2.fluid.io.save_params
.. autofunction:: paddle.fluid.io.save_params
:noindex:
save_persistables
-----------------
.. autofunction:: paddle.v2.fluid.io.save_persistables
.. autofunction:: paddle.fluid.io.save_persistables
:noindex:
load_vars
---------
.. autofunction:: paddle.v2.fluid.io.load_vars
.. autofunction:: paddle.fluid.io.load_vars
:noindex:
load_params
-----------
.. autofunction:: paddle.v2.fluid.io.load_params
.. autofunction:: paddle.fluid.io.load_params
:noindex:
load_persistables
-----------------
.. autofunction:: paddle.v2.fluid.io.load_persistables
.. autofunction:: paddle.fluid.io.load_persistables
:noindex:
save_inference_model
--------------------
.. autofunction:: paddle.v2.fluid.io.save_inference_model
.. autofunction:: paddle.fluid.io.save_inference_model
:noindex:
load_inference_model
--------------------
.. autofunction:: paddle.v2.fluid.io.load_inference_model
.. autofunction:: paddle.fluid.io.load_inference_model
:noindex:
get_inference_program
---------------------
.. autofunction:: paddle.v2.fluid.io.get_inference_program
.. autofunction:: paddle.fluid.io.get_inference_program
:noindex:
......@@ -11,167 +11,167 @@ control_flow
split_lod_tensor
----------------
.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor
.. autofunction:: paddle.fluid.layers.split_lod_tensor
:noindex:
merge_lod_tensor
----------------
.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor
.. autofunction:: paddle.fluid.layers.merge_lod_tensor
:noindex:
BlockGuard
----------
.. autoclass:: paddle.v2.fluid.layers.BlockGuard
.. autoclass:: paddle.fluid.layers.BlockGuard
:members:
:noindex:
BlockGuardWithCompletion
------------------------
.. autoclass:: paddle.v2.fluid.layers.BlockGuardWithCompletion
.. autoclass:: paddle.fluid.layers.BlockGuardWithCompletion
:members:
:noindex:
StaticRNNMemoryLink
-------------------
.. autoclass:: paddle.v2.fluid.layers.StaticRNNMemoryLink
.. autoclass:: paddle.fluid.layers.StaticRNNMemoryLink
:members:
:noindex:
WhileGuard
----------
.. autoclass:: paddle.v2.fluid.layers.WhileGuard
.. autoclass:: paddle.fluid.layers.WhileGuard
:members:
:noindex:
While
-----
.. autoclass:: paddle.v2.fluid.layers.While
.. autoclass:: paddle.fluid.layers.While
:members:
:noindex:
lod_rank_table
--------------
.. autofunction:: paddle.v2.fluid.layers.lod_rank_table
.. autofunction:: paddle.fluid.layers.lod_rank_table
:noindex:
max_sequence_len
----------------
.. autofunction:: paddle.v2.fluid.layers.max_sequence_len
.. autofunction:: paddle.fluid.layers.max_sequence_len
:noindex:
topk
----
.. autofunction:: paddle.v2.fluid.layers.topk
.. autofunction:: paddle.fluid.layers.topk
:noindex:
lod_tensor_to_array
-------------------
.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array
.. autofunction:: paddle.fluid.layers.lod_tensor_to_array
:noindex:
array_to_lod_tensor
-------------------
.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor
.. autofunction:: paddle.fluid.layers.array_to_lod_tensor
:noindex:
increment
---------
.. autofunction:: paddle.v2.fluid.layers.increment
.. autofunction:: paddle.fluid.layers.increment
:noindex:
array_write
-----------
.. autofunction:: paddle.v2.fluid.layers.array_write
.. autofunction:: paddle.fluid.layers.array_write
:noindex:
create_array
------------
.. autofunction:: paddle.v2.fluid.layers.create_array
.. autofunction:: paddle.fluid.layers.create_array
:noindex:
less_than
---------
.. autofunction:: paddle.v2.fluid.layers.less_than
.. autofunction:: paddle.fluid.layers.less_than
:noindex:
array_read
----------
.. autofunction:: paddle.v2.fluid.layers.array_read
.. autofunction:: paddle.fluid.layers.array_read
:noindex:
shrink_memory
-------------
.. autofunction:: paddle.v2.fluid.layers.shrink_memory
.. autofunction:: paddle.fluid.layers.shrink_memory
:noindex:
array_length
------------
.. autofunction:: paddle.v2.fluid.layers.array_length
.. autofunction:: paddle.fluid.layers.array_length
:noindex:
IfElse
------
.. autoclass:: paddle.v2.fluid.layers.IfElse
.. autoclass:: paddle.fluid.layers.IfElse
:members:
:noindex:
DynamicRNN
----------
.. autoclass:: paddle.v2.fluid.layers.DynamicRNN
.. autoclass:: paddle.fluid.layers.DynamicRNN
:members:
:noindex:
ConditionalBlock
----------------
.. autoclass:: paddle.v2.fluid.layers.ConditionalBlock
.. autoclass:: paddle.fluid.layers.ConditionalBlock
:members:
:noindex:
StaticRNN
---------
.. autoclass:: paddle.v2.fluid.layers.StaticRNN
.. autoclass:: paddle.fluid.layers.StaticRNN
:members:
:noindex:
reorder_lod_tensor_by_rank
--------------------------
.. autofunction:: paddle.v2.fluid.layers.reorder_lod_tensor_by_rank
.. autofunction:: paddle.fluid.layers.reorder_lod_tensor_by_rank
:noindex:
ParallelDo
----------
.. autoclass:: paddle.v2.fluid.layers.ParallelDo
.. autoclass:: paddle.fluid.layers.ParallelDo
:members:
:noindex:
Print
-----
.. autofunction:: paddle.v2.fluid.layers.Print
.. autofunction:: paddle.fluid.layers.Print
:noindex:
device
......@@ -180,7 +180,7 @@ device
get_places
----------
.. autofunction:: paddle.v2.fluid.layers.get_places
.. autofunction:: paddle.fluid.layers.get_places
:noindex:
io
......@@ -189,27 +189,27 @@ io
data
----
.. autofunction:: paddle.v2.fluid.layers.data
.. autofunction:: paddle.fluid.layers.data
:noindex:
BlockGuardServ
--------------
.. autoclass:: paddle.v2.fluid.layers.BlockGuardServ
.. autoclass:: paddle.fluid.layers.BlockGuardServ
:members:
:noindex:
ListenAndServ
-------------
.. autoclass:: paddle.v2.fluid.layers.ListenAndServ
.. autoclass:: paddle.fluid.layers.ListenAndServ
:members:
:noindex:
Send
----
.. autofunction:: paddle.v2.fluid.layers.Send
.. autofunction:: paddle.fluid.layers.Send
:noindex:
nn
......@@ -218,259 +218,259 @@ nn
fc
--
.. autofunction:: paddle.v2.fluid.layers.fc
.. autofunction:: paddle.fluid.layers.fc
:noindex:
embedding
---------
.. autofunction:: paddle.v2.fluid.layers.embedding
.. autofunction:: paddle.fluid.layers.embedding
:noindex:
dynamic_lstm
------------
.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm
.. autofunction:: paddle.fluid.layers.dynamic_lstm
:noindex:
dynamic_lstmp
-------------
.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp
.. autofunction:: paddle.fluid.layers.dynamic_lstmp
:noindex:
dynamic_gru
-----------
.. autofunction:: paddle.v2.fluid.layers.dynamic_gru
.. autofunction:: paddle.fluid.layers.dynamic_gru
:noindex:
gru_unit
--------
.. autofunction:: paddle.v2.fluid.layers.gru_unit
.. autofunction:: paddle.fluid.layers.gru_unit
:noindex:
linear_chain_crf
----------------
.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf
.. autofunction:: paddle.fluid.layers.linear_chain_crf
:noindex:
crf_decoding
------------
.. autofunction:: paddle.v2.fluid.layers.crf_decoding
.. autofunction:: paddle.fluid.layers.crf_decoding
:noindex:
cos_sim
-------
.. autofunction:: paddle.v2.fluid.layers.cos_sim
.. autofunction:: paddle.fluid.layers.cos_sim
:noindex:
cross_entropy
-------------
.. autofunction:: paddle.v2.fluid.layers.cross_entropy
.. autofunction:: paddle.fluid.layers.cross_entropy
:noindex:
square_error_cost
-----------------
.. autofunction:: paddle.v2.fluid.layers.square_error_cost
.. autofunction:: paddle.fluid.layers.square_error_cost
:noindex:
accuracy
--------
.. autofunction:: paddle.v2.fluid.layers.accuracy
.. autofunction:: paddle.fluid.layers.accuracy
:noindex:
chunk_eval
----------
.. autofunction:: paddle.v2.fluid.layers.chunk_eval
.. autofunction:: paddle.fluid.layers.chunk_eval
:noindex:
sequence_conv
-------------
.. autofunction:: paddle.v2.fluid.layers.sequence_conv
.. autofunction:: paddle.fluid.layers.sequence_conv
:noindex:
conv2d
------
.. autofunction:: paddle.v2.fluid.layers.conv2d
.. autofunction:: paddle.fluid.layers.conv2d
:noindex:
sequence_pool
-------------
.. autofunction:: paddle.v2.fluid.layers.sequence_pool
.. autofunction:: paddle.fluid.layers.sequence_pool
:noindex:
pool2d
------
.. autofunction:: paddle.v2.fluid.layers.pool2d
.. autofunction:: paddle.fluid.layers.pool2d
:noindex:
batch_norm
----------
.. autofunction:: paddle.v2.fluid.layers.batch_norm
.. autofunction:: paddle.fluid.layers.batch_norm
:noindex:
layer_norm
----------
.. autofunction:: paddle.v2.fluid.layers.layer_norm
.. autofunction:: paddle.fluid.layers.layer_norm
:noindex:
beam_search_decode
------------------
.. autofunction:: paddle.v2.fluid.layers.beam_search_decode
.. autofunction:: paddle.fluid.layers.beam_search_decode
:noindex:
conv2d_transpose
----------------
.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose
.. autofunction:: paddle.fluid.layers.conv2d_transpose
:noindex:
sequence_expand
---------------
.. autofunction:: paddle.v2.fluid.layers.sequence_expand
.. autofunction:: paddle.fluid.layers.sequence_expand
:noindex:
lstm_unit
---------
.. autofunction:: paddle.v2.fluid.layers.lstm_unit
.. autofunction:: paddle.fluid.layers.lstm_unit
:noindex:
reduce_sum
----------
.. autofunction:: paddle.v2.fluid.layers.reduce_sum
.. autofunction:: paddle.fluid.layers.reduce_sum
:noindex:
reduce_mean
-----------
.. autofunction:: paddle.v2.fluid.layers.reduce_mean
.. autofunction:: paddle.fluid.layers.reduce_mean
:noindex:
reduce_max
----------
.. autofunction:: paddle.v2.fluid.layers.reduce_max
.. autofunction:: paddle.fluid.layers.reduce_max
:noindex:
reduce_min
----------
.. autofunction:: paddle.v2.fluid.layers.reduce_min
.. autofunction:: paddle.fluid.layers.reduce_min
:noindex:
sequence_first_step
-------------------
.. autofunction:: paddle.v2.fluid.layers.sequence_first_step
.. autofunction:: paddle.fluid.layers.sequence_first_step
:noindex:
sequence_last_step
------------------
.. autofunction:: paddle.v2.fluid.layers.sequence_last_step
.. autofunction:: paddle.fluid.layers.sequence_last_step
:noindex:
dropout
-------
.. autofunction:: paddle.v2.fluid.layers.dropout
.. autofunction:: paddle.fluid.layers.dropout
:noindex:
split
-----
.. autofunction:: paddle.v2.fluid.layers.split
.. autofunction:: paddle.fluid.layers.split
:noindex:
ctc_greedy_decoder
------------------
.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder
.. autofunction:: paddle.fluid.layers.ctc_greedy_decoder
:noindex:
edit_distance
-------------
.. autofunction:: paddle.v2.fluid.layers.edit_distance
.. autofunction:: paddle.fluid.layers.edit_distance
:noindex:
l2_normalize
------------
.. autofunction:: paddle.v2.fluid.layers.l2_normalize
.. autofunction:: paddle.fluid.layers.l2_normalize
:noindex:
matmul
------
.. autofunction:: paddle.v2.fluid.layers.matmul
.. autofunction:: paddle.fluid.layers.matmul
:noindex:
warpctc
-------
.. autofunction:: paddle.v2.fluid.layers.warpctc
.. autofunction:: paddle.fluid.layers.warpctc
:noindex:
sequence_reshape
----------------
.. autofunction:: paddle.v2.fluid.layers.sequence_reshape
.. autofunction:: paddle.fluid.layers.sequence_reshape
:noindex:
transpose
---------
.. autofunction:: paddle.v2.fluid.layers.transpose
.. autofunction:: paddle.fluid.layers.transpose
:noindex:
im2sequence
-----------
.. autofunction:: paddle.v2.fluid.layers.im2sequence
.. autofunction:: paddle.fluid.layers.im2sequence
:noindex:
nce
---
.. autofunction:: paddle.v2.fluid.layers.nce
.. autofunction:: paddle.fluid.layers.nce
:noindex:
beam_search
-----------
.. autofunction:: paddle.v2.fluid.layers.beam_search
.. autofunction:: paddle.fluid.layers.beam_search
:noindex:
row_conv
--------
.. autofunction:: paddle.v2.fluid.layers.row_conv
.. autofunction:: paddle.fluid.layers.row_conv
:noindex:
multiplex
---------
.. autofunction:: paddle.v2.fluid.layers.multiplex
.. autofunction:: paddle.fluid.layers.multiplex
:noindex:
ops
......@@ -479,259 +479,259 @@ ops
mean
----
.. autofunction:: paddle.v2.fluid.layers.mean
.. autofunction:: paddle.fluid.layers.mean
:noindex:
mul
---
.. autofunction:: paddle.v2.fluid.layers.mul
.. autofunction:: paddle.fluid.layers.mul
:noindex:
reshape
-------
.. autofunction:: paddle.v2.fluid.layers.reshape
.. autofunction:: paddle.fluid.layers.reshape
:noindex:
scale
-----
.. autofunction:: paddle.v2.fluid.layers.scale
.. autofunction:: paddle.fluid.layers.scale
:noindex:
sigmoid_cross_entropy_with_logits
---------------------------------
.. autofunction:: paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits
.. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
:noindex:
elementwise_add
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_add
.. autofunction:: paddle.fluid.layers.elementwise_add
:noindex:
elementwise_div
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_div
.. autofunction:: paddle.fluid.layers.elementwise_div
:noindex:
elementwise_sub
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_sub
.. autofunction:: paddle.fluid.layers.elementwise_sub
:noindex:
elementwise_mul
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_mul
.. autofunction:: paddle.fluid.layers.elementwise_mul
:noindex:
elementwise_max
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_max
.. autofunction:: paddle.fluid.layers.elementwise_max
:noindex:
elementwise_min
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_min
.. autofunction:: paddle.fluid.layers.elementwise_min
:noindex:
elementwise_pow
---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_pow
.. autofunction:: paddle.fluid.layers.elementwise_pow
:noindex:
clip
----
.. autofunction:: paddle.v2.fluid.layers.clip
.. autofunction:: paddle.fluid.layers.clip
:noindex:
clip_by_norm
------------
.. autofunction:: paddle.v2.fluid.layers.clip_by_norm
.. autofunction:: paddle.fluid.layers.clip_by_norm
:noindex:
sequence_softmax
----------------
.. autofunction:: paddle.v2.fluid.layers.sequence_softmax
.. autofunction:: paddle.fluid.layers.sequence_softmax
:noindex:
sigmoid
-------
.. autofunction:: paddle.v2.fluid.layers.sigmoid
.. autofunction:: paddle.fluid.layers.sigmoid
:noindex:
logsigmoid
----------
.. autofunction:: paddle.v2.fluid.layers.logsigmoid
.. autofunction:: paddle.fluid.layers.logsigmoid
:noindex:
exp
---
.. autofunction:: paddle.v2.fluid.layers.exp
.. autofunction:: paddle.fluid.layers.exp
:noindex:
relu
----
.. autofunction:: paddle.v2.fluid.layers.relu
.. autofunction:: paddle.fluid.layers.relu
:noindex:
tanh
----
.. autofunction:: paddle.v2.fluid.layers.tanh
.. autofunction:: paddle.fluid.layers.tanh
:noindex:
tanh_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.tanh_shrink
.. autofunction:: paddle.fluid.layers.tanh_shrink
:noindex:
softshrink
----------
.. autofunction:: paddle.v2.fluid.layers.softshrink
.. autofunction:: paddle.fluid.layers.softshrink
:noindex:
sqrt
----
.. autofunction:: paddle.v2.fluid.layers.sqrt
.. autofunction:: paddle.fluid.layers.sqrt
:noindex:
abs
---
.. autofunction:: paddle.v2.fluid.layers.abs
.. autofunction:: paddle.fluid.layers.abs
:noindex:
ceil
----
.. autofunction:: paddle.v2.fluid.layers.ceil
.. autofunction:: paddle.fluid.layers.ceil
:noindex:
floor
-----
.. autofunction:: paddle.v2.fluid.layers.floor
.. autofunction:: paddle.fluid.layers.floor
:noindex:
round
-----
.. autofunction:: paddle.v2.fluid.layers.round
.. autofunction:: paddle.fluid.layers.round
:noindex:
reciprocal
----------
.. autofunction:: paddle.v2.fluid.layers.reciprocal
.. autofunction:: paddle.fluid.layers.reciprocal
:noindex:
log
---
.. autofunction:: paddle.v2.fluid.layers.log
.. autofunction:: paddle.fluid.layers.log
:noindex:
square
------
.. autofunction:: paddle.v2.fluid.layers.square
.. autofunction:: paddle.fluid.layers.square
:noindex:
softplus
--------
.. autofunction:: paddle.v2.fluid.layers.softplus
.. autofunction:: paddle.fluid.layers.softplus
:noindex:
softsign
--------
.. autofunction:: paddle.v2.fluid.layers.softsign
.. autofunction:: paddle.fluid.layers.softsign
:noindex:
brelu
-----
.. autofunction:: paddle.v2.fluid.layers.brelu
.. autofunction:: paddle.fluid.layers.brelu
:noindex:
leaky_relu
----------
.. autofunction:: paddle.v2.fluid.layers.leaky_relu
.. autofunction:: paddle.fluid.layers.leaky_relu
:noindex:
soft_relu
---------
.. autofunction:: paddle.v2.fluid.layers.soft_relu
.. autofunction:: paddle.fluid.layers.soft_relu
:noindex:
elu
---
.. autofunction:: paddle.v2.fluid.layers.elu
.. autofunction:: paddle.fluid.layers.elu
:noindex:
relu6
-----
.. autofunction:: paddle.v2.fluid.layers.relu6
.. autofunction:: paddle.fluid.layers.relu6
:noindex:
pow
---
.. autofunction:: paddle.v2.fluid.layers.pow
.. autofunction:: paddle.fluid.layers.pow
:noindex:
stanh
-----
.. autofunction:: paddle.v2.fluid.layers.stanh
.. autofunction:: paddle.fluid.layers.stanh
:noindex:
hard_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.hard_shrink
.. autofunction:: paddle.fluid.layers.hard_shrink
:noindex:
thresholded_relu
----------------
.. autofunction:: paddle.v2.fluid.layers.thresholded_relu
.. autofunction:: paddle.fluid.layers.thresholded_relu
:noindex:
hard_sigmoid
------------
.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid
.. autofunction:: paddle.fluid.layers.hard_sigmoid
:noindex:
swish
-----
.. autofunction:: paddle.v2.fluid.layers.swish
.. autofunction:: paddle.fluid.layers.swish
:noindex:
tensor
......@@ -740,66 +740,66 @@ tensor
create_tensor
-------------
.. autofunction:: paddle.v2.fluid.layers.create_tensor
.. autofunction:: paddle.fluid.layers.create_tensor
:noindex:
create_parameter
----------------
.. autofunction:: paddle.v2.fluid.layers.create_parameter
.. autofunction:: paddle.fluid.layers.create_parameter
:noindex:
create_global_var
-----------------
.. autofunction:: paddle.v2.fluid.layers.create_global_var
.. autofunction:: paddle.fluid.layers.create_global_var
:noindex:
cast
----
.. autofunction:: paddle.v2.fluid.layers.cast
.. autofunction:: paddle.fluid.layers.cast
:noindex:
concat
------
.. autofunction:: paddle.v2.fluid.layers.concat
.. autofunction:: paddle.fluid.layers.concat
:noindex:
sums
----
.. autofunction:: paddle.v2.fluid.layers.sums
.. autofunction:: paddle.fluid.layers.sums
:noindex:
assign
------
.. autofunction:: paddle.v2.fluid.layers.assign
.. autofunction:: paddle.fluid.layers.assign
:noindex:
fill_constant_batch_size_like
-----------------------------
.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like
.. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like
:noindex:
fill_constant
-------------
.. autofunction:: paddle.v2.fluid.layers.fill_constant
.. autofunction:: paddle.fluid.layers.fill_constant
:noindex:
ones
----
.. autofunction:: paddle.v2.fluid.layers.ones
.. autofunction:: paddle.fluid.layers.ones
:noindex:
zeros
-----
.. autofunction:: paddle.v2.fluid.layers.zeros
.. autofunction:: paddle.fluid.layers.zeros
:noindex:
......@@ -8,24 +8,24 @@ nets
simple_img_conv_pool
--------------------
.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool
.. autofunction:: paddle.fluid.nets.simple_img_conv_pool
:noindex:
sequence_conv_pool
------------------
.. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool
.. autofunction:: paddle.fluid.nets.sequence_conv_pool
:noindex:
glu
---
.. autofunction:: paddle.v2.fluid.nets.glu
.. autofunction:: paddle.fluid.nets.glu
:noindex:
scaled_dot_product_attention
----------------------------
.. autofunction:: paddle.v2.fluid.nets.scaled_dot_product_attention
.. autofunction:: paddle.fluid.nets.scaled_dot_product_attention
:noindex:
......@@ -8,42 +8,42 @@ optimizer
SGD
---
.. autoclass:: paddle.v2.fluid.optimizer.SGD
.. autoclass:: paddle.fluid.optimizer.SGD
:members:
:noindex:
Momentum
--------
.. autoclass:: paddle.v2.fluid.optimizer.Momentum
.. autoclass:: paddle.fluid.optimizer.Momentum
:members:
:noindex:
Adagrad
-------
.. autoclass:: paddle.v2.fluid.optimizer.Adagrad
.. autoclass:: paddle.fluid.optimizer.Adagrad
:members:
:noindex:
Adam
----
.. autoclass:: paddle.v2.fluid.optimizer.Adam
.. autoclass:: paddle.fluid.optimizer.Adam
:members:
:noindex:
Adamax
------
.. autoclass:: paddle.v2.fluid.optimizer.Adamax
.. autoclass:: paddle.fluid.optimizer.Adamax
:members:
:noindex:
DecayedAdagrad
--------------
.. autoclass:: paddle.v2.fluid.optimizer.DecayedAdagrad
.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad
:members:
:noindex:
......@@ -8,14 +8,14 @@ param_attr
ParamAttr
---------
.. autoclass:: paddle.v2.fluid.param_attr.ParamAttr
.. autoclass:: paddle.fluid.param_attr.ParamAttr
:members:
:noindex:
WeightNormParamAttr
-------------------
.. autoclass:: paddle.v2.fluid.param_attr.WeightNormParamAttr
.. autoclass:: paddle.fluid.param_attr.WeightNormParamAttr
:members:
:noindex:
......@@ -8,18 +8,18 @@ profiler
cuda_profiler
-------------
.. autofunction:: paddle.v2.fluid.profiler.cuda_profiler
.. autofunction:: paddle.fluid.profiler.cuda_profiler
:noindex:
reset_profiler
--------------
.. autofunction:: paddle.v2.fluid.profiler.reset_profiler
.. autofunction:: paddle.fluid.profiler.reset_profiler
:noindex:
profiler
--------
.. autofunction:: paddle.v2.fluid.profiler.profiler
.. autofunction:: paddle.fluid.profiler.profiler
:noindex:
......@@ -8,20 +8,20 @@ regularizer
append_regularization_ops
-------------------------
.. autofunction:: paddle.v2.fluid.regularizer.append_regularization_ops
.. autofunction:: paddle.fluid.regularizer.append_regularization_ops
:noindex:
L1Decay
-------
.. autoclass:: paddle.v2.fluid.regularizer.L1Decay
.. autoclass:: paddle.fluid.regularizer.L1Decay
:members:
:noindex:
L2Decay
-------
.. autoclass:: paddle.v2.fluid.regularizer.L2Decay
.. autoclass:: paddle.fluid.regularizer.L2Decay
:members:
:noindex:
......@@ -18,6 +18,7 @@ import shlex
from recommonmark import parser, transform
import paddle
import paddle.v2
import paddle.fluid
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
......
......@@ -18,6 +18,7 @@ import shlex
from recommonmark import parser, transform
import paddle
import paddle.v2
import paddle.fluid
MarkdownParser = parser.CommonMarkParser
......
......@@ -68,9 +68,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto)
add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(framework_py_proto framework_py_proto_init)
add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto
COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto/
COMMENT "Copy generated python proto into directory paddle/v2/fluid/proto."
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto
COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/
COMMENT "Copy generated python proto into directory paddle/fluid/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
cc_library(backward SRCS backward.cc DEPS net_op)
......
......@@ -4,7 +4,7 @@ function(inference_test TARGET_NAME)
set(multiValueArgs ARGS)
cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests)
set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/fluid/tests)
set(arg_list "")
if(inference_test_ARGS)
foreach(arg ${inference_test_ARGS})
......
......@@ -83,7 +83,7 @@ class CompareOp : public framework::OperatorWithKernel {
} // namespace operators
} // namespace paddle
#define REGISTER_LOGICAL_OP(op_type, _equation) \
#define REGISTER_COMPARE_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
......@@ -96,11 +96,17 @@ class CompareOp : public framework::OperatorWithKernel {
::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker);
REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y");
REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_OP(equal, "Out = X == Y");
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor);
REGISTER_LOGICAL_OP(not_equal, "Out = X != Y");
REGISTER_LOGICAL_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor);
REGISTER_COMPARE_OP(less_than, "Out = X < Y");
REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
REGISTER_COMPARE_OP(less_equal, "Out = X <= Y");
REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
REGISTER_COMPARE_OP(greater_than, "Out = X > Y");
REGISTER_COMPARE_KERNEL(greater_than, CPU,
paddle::operators::GreaterThanFunctor);
REGISTER_COMPARE_OP(greater_equal, "Out = X >= Y");
REGISTER_COMPARE_KERNEL(greater_equal, CPU,
paddle::operators::GreaterEqualFunctor);
REGISTER_COMPARE_OP(equal, "Out = X == Y");
REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor);
REGISTER_COMPARE_OP(not_equal, "Out = X != Y");
REGISTER_COMPARE_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor);
......@@ -14,7 +14,11 @@ limitations under the License. */
#include "paddle/fluid/operators/compare_op.h"
REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);
REGISTER_LOGICAL_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor);
REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
REGISTER_COMPARE_KERNEL(greater_than, CUDA,
paddle::operators::GreaterThanFunctor);
REGISTER_COMPARE_KERNEL(greater_equal, CUDA,
paddle::operators::GreaterEqualFunctor);
REGISTER_COMPARE_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);
REGISTER_COMPARE_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor);
......@@ -34,6 +34,18 @@ struct LessEqualFunctor {
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; }
};
template <typename T>
struct GreaterThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; }
};
template <typename T>
struct GreaterEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; }
};
template <typename T>
struct EqualFunctor {
using ELEM_TYPE = T;
......@@ -76,7 +88,7 @@ class CompareOpKernel
} // namespace operators
} // namespace paddle
#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \
#define REGISTER_COMPARE_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::CompareOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int>>, \
......
......@@ -129,9 +129,6 @@ TEST(NCCL, all_reduce) {
} // namespace paddle
int main(int argc, char** argv) {
// FIXME(tonyyang-svail):
// Due to the driver issue on our CI, disable for now
return 0;
dev_count = paddle::platform::GetCUDADeviceCount();
if (dev_count <= 1) {
LOG(WARNING)
......
......@@ -49,11 +49,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
namespace paddle {
namespace pybind {
static size_t UniqueIntegerGenerator(const std::string &prefix) {
static std::unordered_map<std::string, std::atomic<size_t>> generators;
return generators[prefix].fetch_add(1);
}
bool IsCompiledWithCUDA() {
#ifndef PADDLE_WITH_CUDA
return false;
......@@ -410,7 +405,6 @@ All parameter, weight, gradient are variables in Paddle.
(void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
Executor::Run);
m.def("unique_integer", UniqueIntegerGenerator);
m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("init_devices", &framework::InitDevices);
......
......@@ -171,7 +171,7 @@ EOF
EOF
if [[ ${WITH_GPU} == "ON" ]]; then
NCCL_DEPS="apt-get install -y libnccl-dev &&"
NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&"
else
NCCL_DEPS=""
fi
......
......@@ -3,12 +3,14 @@ file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py)
file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py)
file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py)
file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py)
file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/ *.py)
set(PY_FILES paddle/__init__.py
${TRAINER_PY_FILES}
${HELPERS_PY_FILES}
${UTILS_PY_FILES}
${V2_PY_FILES})
${V2_PY_FILES}
${FLUID_PY_FILES})
add_custom_target(copy_paddle_master)
......@@ -43,10 +45,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
${CMAKE_CURRENT_BINARY_DIR}/setup.py)
add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so
COMMAND cmake -E copy $<TARGET_FILE:paddle_pybind> ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so
add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so
COMMAND cmake -E copy $<TARGET_FILE:paddle_pybind> ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so
DEPENDS paddle_pybind)
add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so)
add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so)
add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp
......@@ -72,7 +74,7 @@ if (WITH_TESTING)
add_subdirectory(paddle/v2/tests)
add_subdirectory(paddle/v2/reader/tests)
add_subdirectory(paddle/v2/plot/tests)
add_subdirectory(paddle/v2/fluid/tests)
add_subdirectory(paddle/fluid/tests)
endif()
endif()
install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR}
......
......@@ -39,6 +39,7 @@ from concurrency import (Go, make_channel, channel_send, channel_recv,
import clip
from memory_optimization_transpiler import memory_optimize
import profiler
import unique_name
Tensor = LoDTensor
......@@ -63,6 +64,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [
'DistributeTranspiler',
'memory_optimize',
'profiler',
'unique_name',
]
......
......@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.v2.fluid import framework as framework
from paddle.fluid import framework as framework
from . import core
import collections
import copy
import unique_name
__all__ = [
'append_backward',
......@@ -391,7 +392,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
for name in op_desc.output_arg_names():
if block.desc.find_var(name.encode("ascii")):
new_name = "%s_%s" % (name, core.unique_integer(name))
new_name = unique_name.generate(name)
op_desc.rename_output(name, new_name)
var_map[name] = new_name
......
......@@ -26,7 +26,7 @@ A `scoped_function` will take a `function` as input. That function will be
invoked in a new local scope.
"""
import paddle.v2.fluid.core
import paddle.fluid.core
import threading
__tl_scope__ = threading.local()
......@@ -44,13 +44,13 @@ __all__ = [
def get_cur_scope():
"""
Get current scope.
:rtype: paddle.v2.fluid.core.Scope
:rtype: paddle.fluid.core.Scope
"""
cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None)
if cur_scope_stack is None:
__tl_scope__.cur_scope = list()
if len(__tl_scope__.cur_scope) == 0:
__tl_scope__.cur_scope.append(paddle.v2.fluid.core.Scope())
__tl_scope__.cur_scope.append(paddle.fluid.core.Scope())
return __tl_scope__.cur_scope[-1]
......
......@@ -15,7 +15,8 @@
import numpy as np
import layers
from framework import Program, unique_name, Variable, program_guard
from framework import Program, Variable, program_guard
import unique_name
from layer_helper import LayerHelper
__all__ = [
......@@ -96,7 +97,7 @@ class Evaluator(object):
"""
state = self.helper.create_variable(
name="_".join([unique_name(self.helper.name), suffix]),
name="_".join([unique_name.generate(self.helper.name), suffix]),
persistable=True,
dtype=dtype,
shape=shape)
......
......@@ -20,6 +20,7 @@ import numpy as np
import proto.framework_pb2 as framework_pb2
from . import core
import unique_name
__all__ = [
'Block',
......@@ -47,20 +48,6 @@ def grad_var_name(var_name):
return var_name + GRAD_VAR_SUFFIX
def unique_name(prefix):
"""
Generate unique names with prefix
Args:
prefix(str): The prefix of return string
Returns(str): A unique string with the prefix
"""
uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)])
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
......@@ -175,7 +162,7 @@ class Variable(object):
self.error_clip = error_clip
if name is None:
name = Variable._unique_var_name_()
name = unique_name.generate('_generated_var')
is_new_var = False
self.desc = self.block.desc.find_var(name)
......@@ -307,12 +294,6 @@ class Variable(object):
def type(self):
return self.desc.type()
@staticmethod
def _unique_var_name_():
prefix = "_generated_var"
uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)])
def set_error_clip(self, error_clip):
self.error_clip = error_clip
......
......@@ -14,8 +14,8 @@
import os
from paddle.v2.fluid.evaluator import Evaluator
from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable
from paddle.fluid.evaluator import Evaluator
from paddle.fluid.framework import Program, Parameter, default_main_program, Variable
from . import core
__all__ = [
......
......@@ -15,9 +15,9 @@
import copy
import itertools
from framework import Variable, Parameter, default_main_program, default_startup_program, \
unique_name, dtype_is_floating
from paddle.v2.fluid.initializer import Constant, Xavier
from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
import unique_name
from paddle.fluid.initializer import Constant, Xavier
from param_attr import ParamAttr, WeightNormParamAttr
......@@ -27,7 +27,7 @@ class LayerHelper(object):
self.layer_type = layer_type
name = self.kwargs.get('name', None)
if name is None:
self.kwargs['name'] = unique_name(self.layer_type)
self.kwargs['name'] = unique_name.generate(self.layer_type)
@property
def name(self):
......@@ -117,17 +117,20 @@ class LayerHelper(object):
block=self.startup_program.global_block()):
if out is None:
out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
name=unique_name.generate(".".join(
[self.name, 'weight_norm_norm'])),
dtype=dtype,
persistable=False)
abs_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_abs'])),
name=unique_name.generate(".".join(
[self.name, 'weight_norm_abs'])),
dtype=dtype,
persistable=False)
block.append_op(
type='abs', inputs={'X': x}, outputs={'Out': abs_out})
pow_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_pow'])),
name=unique_name.generate(".".join(
[self.name, 'weight_norm_pow'])),
dtype=dtype,
persistable=False)
block.append_op(
......@@ -136,7 +139,8 @@ class LayerHelper(object):
outputs={'Out': pow_out},
attrs={'factor': float(p)})
sum_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_sum'])),
name=unique_name.generate(".".join(
[self.name, 'weight_norm_sum'])),
dtype=dtype,
persistable=False)
block.append_op(
......@@ -161,7 +165,7 @@ class LayerHelper(object):
block=self.startup_program.global_block()):
if out is None:
out = block.create_var(
name=unique_name(".".join(
name=unique_name.generate(".".join(
[self.name, 'weight_norm_reshape'])),
dtype=dtype,
persistable=False)
......@@ -178,7 +182,7 @@ class LayerHelper(object):
block=self.startup_program.global_block()):
if out is None:
out = block.create_var(
name=unique_name(".".join(
name=unique_name.generate(".".join(
[self.name, 'weight_norm_transpose'])),
dtype=dtype,
persistable=False)
......@@ -196,7 +200,8 @@ class LayerHelper(object):
"""Computes the norm over all dimensions except dim"""
if out is None:
out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
name=unique_name.generate(".".join(
[self.name, 'weight_norm_norm'])),
dtype=dtype,
persistable=False)
if dim is None:
......@@ -286,7 +291,7 @@ class LayerHelper(object):
assert isinstance(attr, ParamAttr)
suffix = 'b' if is_bias else 'w'
if attr.name is None:
attr.name = unique_name(".".join([self.name, suffix]))
attr.name = unique_name.generate(".".join([self.name, suffix]))
if default_initializer is None and attr.initializer is None:
if is_bias:
......@@ -316,7 +321,7 @@ class LayerHelper(object):
def create_tmp_variable(self, dtype, stop_gradient=False):
return self.main_program.current_block().create_var(
name=unique_name(".".join([self.name, 'tmp'])),
name=unique_name.generate(".".join([self.name, 'tmp'])),
dtype=dtype,
persistable=False,
stop_gradient=stop_gradient)
......
......@@ -428,7 +428,8 @@ class StaticRNN(object):
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self.parent_block()
var_name = unique_name("@".join([self.helper.name, "memory_boot"]))
var_name = unique_name.generate("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
......@@ -450,7 +451,7 @@ class StaticRNN(object):
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name("@".join([self.helper.name, "mem"])),
name=unique_name.generate("@".join([self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
......@@ -652,7 +653,8 @@ class While(object):
parent_block.append_op(
type='while',
inputs={
'X': [parent_block.var(x_name) for x_name in x_name_list],
'X':
[parent_block.var_recursive(x_name) for x_name in x_name_list],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
......@@ -709,7 +711,7 @@ def lod_rank_table(x, level=0):
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name("lod_rank_table"))
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
......@@ -807,7 +809,7 @@ def lod_tensor_to_array(x, table):
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name("lod_tensor_to_array"),
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
......@@ -1264,11 +1266,11 @@ class IfElse(object):
if id(x) not in self.input_table:
parent_block = self.parent_block()
out_true = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
......@@ -1310,7 +1312,8 @@ class IfElse(object):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name("_".join([self.helper.name, 'output'])),
name=unique_name.generate("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
......@@ -1373,7 +1376,7 @@ class DynamicRNN(object):
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name('lod_rank_table'),
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
......@@ -1381,7 +1384,8 @@ class DynamicRNN(object):
inputs={"X": x},
outputs={"Out": self.lod_rank_table})
self.max_seq_len = parent_block.create_var(
name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64')
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
......@@ -1395,7 +1399,7 @@ class DynamicRNN(object):
outputs={'Out': self.cond})
input_array = parent_block.create_var(
name=unique_name('dynamic_rnn_input_array'),
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
......@@ -1416,7 +1420,7 @@ class DynamicRNN(object):
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name("dynamic_rnn_static_input_reordered"),
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
......@@ -1478,7 +1482,7 @@ class DynamicRNN(object):
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name('dynamic_rnn_mem_init_reordered'),
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
......@@ -1490,7 +1494,7 @@ class DynamicRNN(object):
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name('dynamic_rnn_mem_array'),
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
......@@ -1510,9 +1514,10 @@ class DynamicRNN(object):
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name('mem_init'), dtype=dtype)
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype)
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
......@@ -1551,7 +1556,7 @@ class DynamicRNN(object):
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name("_".join(
name=unique_name.generate("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
......
......@@ -25,7 +25,8 @@ __all__ = ['get_places']
@autodoc()
def get_places(device_count=None, device_type=None):
helper = LayerHelper('get_places', **locals())
out_places = helper.create_variable(name=unique_name(helper.name + ".out"))
out_places = helper.create_variable(
name=unique_name.generate(helper.name + ".out"))
attrs = dict()
if device_count is not None:
attrs['device_count'] = int(device_count)
......
......@@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable']
def monkey_patch_variable():
def unique_tmp_name():
return unique_name("tmp")
return unique_name.generate("tmp")
def safe_get_dtype(var):
try:
......@@ -157,7 +157,9 @@ def monkey_patch_variable():
("__eq__", "equal", False),
("__ne__", "not_equal", False),
("__lt__", "less_than", False),
("__le__", "less_equal", False)):
("__le__", "less_equal", False),
("__gt__", "greater_than", False),
("__ge__", "greater_equal", False)):
setattr(Variable, method_name,
_elemwise_method_creator_(method_name, op_type, reverse))
......
......@@ -1519,21 +1519,21 @@ def batch_norm(input,
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_global_variable(
name=moving_mean_name,
dtype=input.dtype,
mean = helper.create_parameter(
attr=ParamAttr(
name=moving_mean_name, initializer=Constant(0.0), trainable=False),
shape=param_shape,
persistable=True,
stop_gradient=True)
helper.set_variable_initializer(var=mean, initializer=Constant(0.0))
dtype=input.dtype)
mean.stop_gradient = True
variance = helper.create_global_variable(
variance = helper.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
dtype=input.dtype,
initializer=Constant(1.0),
trainable=False),
shape=param_shape,
persistable=True,
stop_gradient=True)
helper.set_variable_initializer(var=variance, initializer=Constant(1.0))
dtype=input.dtype)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
......
......@@ -17,8 +17,8 @@ import json
import logging
from collections import defaultdict
import paddle.v2.fluid.core as core
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.fluid.core as core
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
def get_all_op_protos():
......
......@@ -17,7 +17,8 @@ from collections import defaultdict
import framework
import layers
from backward import append_backward
from framework import unique_name, program_guard
from framework import program_guard
import unique_name
from initializer import Constant
from layer_helper import LayerHelper
from regularizer import append_regularization_ops
......@@ -49,7 +50,7 @@ class Optimizer(object):
def _create_global_learning_rate(self):
if isinstance(self._global_learning_rate, float):
self._global_learning_rate = layers.create_global_var(
name=unique_name("learning_rate"),
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._global_learning_rate),
dtype='float32',
......@@ -118,7 +119,7 @@ class Optimizer(object):
assert isinstance(self.helper, LayerHelper)
var = self.helper.create_global_variable(
name=unique_name(name),
name=unique_name.generate(name),
persistable=True,
dtype=dtype or param.dtype,
type=param.type,
......@@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer):
# Create beta1 and beta2 power tensors
beta_shape = [1]
self._beta1_pow_acc = self.helper.create_global_variable(
name=unique_name('beta1_pow_acc'),
name=unique_name.generate('beta1_pow_acc'),
dtype='float32',
shape=beta_shape,
lod_level=0,
......@@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer):
self._beta1_pow_acc, initializer=Constant(self._beta1))
self._beta2_pow_acc = self.helper.create_global_variable(
name=unique_name('beta2_pow_acc'),
name=unique_name.generate('beta2_pow_acc'),
dtype='float32',
shape=beta_shape,
lod_level=0,
......@@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer):
# Create beta1 power accumulator tensor
beta_shape = [1]
self._beta1_pow_acc = self.helper.create_global_variable(
name=unique_name('beta1_pow_acc'),
name=unique_name.generate('beta1_pow_acc'),
dtype='float32',
shape=beta_shape,
lod_level=0,
......
......@@ -14,15 +14,15 @@
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.core as core
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import contextlib
import math
import sys
import unittest
from paddle.v2.fluid.executor import Executor
from paddle.fluid.executor import Executor
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import contextlib
import numpy
import unittest
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import contextlib
import math
import sys
......
......@@ -17,8 +17,8 @@ import math
import numpy as np
import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05
import paddle.v2.fluid as fluid
from paddle.v2.fluid.initializer import init_on_cpu
import paddle.fluid as fluid
from paddle.fluid.initializer import init_on_cpu
import contextlib
import time
import unittest
......
......@@ -15,10 +15,10 @@ import contextlib
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as pd
from paddle.v2.fluid.executor import Executor
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
import unittest
dict_size = 30000
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from __future__ import print_function
import argparse
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import paddle.v2 as paddle
import sys
import numpy
......
......@@ -16,12 +16,12 @@ import math
import sys
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import SGDOptimizer
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
IS_SPARSE = True
USE_GPU = False
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import unittest
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import paddle.v2 as paddle
import contextlib
import math
......@@ -47,6 +47,46 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
return avg_cost, accuracy, prediction
def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,
lstm_size=128):
emb = fluid.layers.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True)
sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh')
rnn = fluid.layers.DynamicRNN()
with rnn.block():
word = rnn.step_input(sentence)
prev_hidden = rnn.memory(value=0.0, shape=[lstm_size])
prev_cell = rnn.memory(value=0.0, shape=[lstm_size])
def gate_common(ipt, hidden, size):
gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True)
gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)
return gate0 + gate1
forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell = forget_gate * prev_cell + input_gate * cell_gate
hidden = output_gate * fluid.layers.tanh(x=cell)
rnn.update_memory(prev_cell, cell)
rnn.update_memory(prev_hidden, hidden)
rnn.output(hidden)
last = fluid.layers.sequence_last_step(rnn())
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
def stacked_lstm_net(data,
label,
input_dim,
......@@ -270,6 +310,23 @@ class TestUnderstandSentiment(unittest.TestCase):
use_cuda=True,
parallel=True)
@unittest.skip(reason='make CI faster')
def test_dynrnn_lstm_gpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=False)
def test_dynrnn_lstm_gpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=True)
if __name__ == '__main__':
unittest.main()
......@@ -12,7 +12,7 @@
# limitations under the License.
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import unittest
import os
import numpy as np
......
......@@ -14,7 +14,7 @@
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import os
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import os
import sys
......
......@@ -17,7 +17,7 @@ import math
import numpy as np
import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import time
import os
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import os
PASS_NUM = 100
......
......@@ -14,11 +14,11 @@
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.core as core
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import os
dict_size = 30000
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import os
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32')
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import os
BATCH_SIZE = 128
......
......@@ -15,11 +15,11 @@
import numpy as np
import os
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.core as core
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.optimizer import SGDOptimizer
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.optimizer import SGDOptimizer
IS_SPARSE = True
BATCH_SIZE = 256
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import os
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
......
......@@ -15,7 +15,7 @@
import numpy as np
import os
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
def stacked_lstm_net(data,
......
......@@ -14,7 +14,7 @@
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import math
import sys
......
......@@ -17,7 +17,7 @@ from __future__ import print_function
import sys
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import math
import sys
......
......@@ -14,11 +14,11 @@
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.core as core
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import math
import sys
......
......@@ -20,7 +20,7 @@ import matplotlib
import numpy
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
matplotlib.use('Agg')
import matplotlib.pyplot as plt
......
......@@ -13,9 +13,9 @@
# limitations under the License.
import unittest
import paddle.v2.fluid as fluid
import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
class TestRoutineOp(unittest.TestCase):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
class TestCSPFramework(unittest.TestCase):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
import numpy as np
prog = fluid.framework.Program()
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.fluid as fluid
import paddle.fluid as fluid
def test_converter():
......
......@@ -13,9 +13,9 @@
# limitations under the License.
from __future__ import print_function
import paddle.v2.fluid as fluid
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
import unittest
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册