提交 a50230ad 编写于 作者: T typhoonzero

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into add_raw_var_type

...@@ -22,7 +22,8 @@ COPY ./paddle/scripts/docker/root/ /root/ ...@@ -22,7 +22,8 @@ COPY ./paddle/scripts/docker/root/ /root/
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
git python-pip python-dev openssh-server bison libnccl-dev \ git python-pip python-dev openssh-server bison \
libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \
wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
curl sed grep graphviz libjpeg-dev zlib1g-dev \ curl sed grep graphviz libjpeg-dev zlib1g-dev \
python-matplotlib gcc-4.8 g++-4.8 \ python-matplotlib gcc-4.8 g++-4.8 \
......
API
===
.. toctree::
:maxdepth: 1
模型配置 <v2/model_configs.rst>
数据访问 <v2/data.rst>
训练与应用 <v2/run_logic.rst>
v2/fluid.rst
...@@ -4,6 +4,7 @@ API ...@@ -4,6 +4,7 @@ API
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
overview.rst
v2/model_configs.rst v2/model_configs.rst
v2/data.rst v2/data.rst
v2/run_logic.rst v2/run_logic.rst
......
V2 API Overview
================
The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle),
it proposes some high-level concepts such as `Layers <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/layer.html>`_ , `Optimizer <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/optimizer.html>`_ , `Evaluator <http://www.paddlepaddle.org/docs/develop/api/en/v2/config/evaluators.html>`_ and `Data Reader <http://www.paddlepaddle.org/docs/develop/api/en/v2/data/data_reader.html>`_ to make the model configuration more familiar to users.
A model is composed of the computation described by a group of `Layers`, with `Evaluator` to define the error, `Optimizer` to update the parameters and `Data Reader` to feed in the data.
We also provide the `interface for Training and Inference <http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html>`_ to help control the training and inference phrase,
it has several easy to use methods
- `paddle.train`
- `paddle.test`
- `paddle.infer`
to better expose the internal running details, different `events <http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html#event>`_ are available to users by writing some callbacks.
...@@ -8,7 +8,7 @@ data_feeder ...@@ -8,7 +8,7 @@ data_feeder
DataFeeder DataFeeder
---------- ----------
.. autoclass:: paddle.v2.fluid.data_feeder.DataFeeder .. autoclass:: paddle.fluid.data_feeder.DataFeeder
:members: :members:
:noindex: :noindex:
...@@ -8,14 +8,14 @@ evaluator ...@@ -8,14 +8,14 @@ evaluator
Accuracy Accuracy
-------- --------
.. autoclass:: paddle.v2.fluid.evaluator.Accuracy .. autoclass:: paddle.fluid.evaluator.Accuracy
:members: :members:
:noindex: :noindex:
ChunkEvaluator ChunkEvaluator
-------------- --------------
.. autoclass:: paddle.v2.fluid.evaluator.ChunkEvaluator .. autoclass:: paddle.fluid.evaluator.ChunkEvaluator
:members: :members:
:noindex: :noindex:
...@@ -8,25 +8,25 @@ executor ...@@ -8,25 +8,25 @@ executor
Executor Executor
-------- --------
.. autoclass:: paddle.v2.fluid.executor.Executor .. autoclass:: paddle.fluid.executor.Executor
:members: :members:
:noindex: :noindex:
global_scope global_scope
------------ ------------
.. autofunction:: paddle.v2.fluid.executor.global_scope .. autofunction:: paddle.fluid.executor.global_scope
:noindex: :noindex:
scope_guard scope_guard
----------- -----------
.. autofunction:: paddle.v2.fluid.executor.scope_guard .. autofunction:: paddle.fluid.executor.scope_guard
:noindex: :noindex:
switch_scope switch_scope
------------ ------------
.. autofunction:: paddle.v2.fluid.executor.switch_scope .. autofunction:: paddle.fluid.executor.switch_scope
:noindex: :noindex:
...@@ -17,7 +17,7 @@ import argparse ...@@ -17,7 +17,7 @@ import argparse
import sys import sys
import types import types
import paddle.v2.fluid as fluid import paddle.fluid as fluid
def parse_arg(): def parse_arg():
...@@ -70,7 +70,7 @@ class DocGenerator(object): ...@@ -70,7 +70,7 @@ class DocGenerator(object):
def print_class(self, name): def print_class(self, name):
self._print_header_(name, dot='-', is_title=False) self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autoclass:: paddle.v2.fluid.{0}.{1} self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1}
:members: :members:
:noindex: :noindex:
...@@ -78,7 +78,7 @@ class DocGenerator(object): ...@@ -78,7 +78,7 @@ class DocGenerator(object):
def print_method(self, name): def print_method(self, name):
self._print_header_(name, dot='-', is_title=False) self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autofunction:: paddle.v2.fluid.{0}.{1} self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1}
:noindex: :noindex:
'''.format(self.module_name, name)) '''.format(self.module_name, name))
......
...@@ -8,28 +8,28 @@ initializer ...@@ -8,28 +8,28 @@ initializer
Constant Constant
-------- --------
.. autoclass:: paddle.v2.fluid.initializer.Constant .. autoclass:: paddle.fluid.initializer.Constant
:members: :members:
:noindex: :noindex:
Uniform Uniform
------- -------
.. autoclass:: paddle.v2.fluid.initializer.Uniform .. autoclass:: paddle.fluid.initializer.Uniform
:members: :members:
:noindex: :noindex:
Normal Normal
------ ------
.. autoclass:: paddle.v2.fluid.initializer.Normal .. autoclass:: paddle.fluid.initializer.Normal
:members: :members:
:noindex: :noindex:
Xavier Xavier
------ ------
.. autoclass:: paddle.v2.fluid.initializer.Xavier .. autoclass:: paddle.fluid.initializer.Xavier
:members: :members:
:noindex: :noindex:
...@@ -8,54 +8,54 @@ io ...@@ -8,54 +8,54 @@ io
save_vars save_vars
--------- ---------
.. autofunction:: paddle.v2.fluid.io.save_vars .. autofunction:: paddle.fluid.io.save_vars
:noindex: :noindex:
save_params save_params
----------- -----------
.. autofunction:: paddle.v2.fluid.io.save_params .. autofunction:: paddle.fluid.io.save_params
:noindex: :noindex:
save_persistables save_persistables
----------------- -----------------
.. autofunction:: paddle.v2.fluid.io.save_persistables .. autofunction:: paddle.fluid.io.save_persistables
:noindex: :noindex:
load_vars load_vars
--------- ---------
.. autofunction:: paddle.v2.fluid.io.load_vars .. autofunction:: paddle.fluid.io.load_vars
:noindex: :noindex:
load_params load_params
----------- -----------
.. autofunction:: paddle.v2.fluid.io.load_params .. autofunction:: paddle.fluid.io.load_params
:noindex: :noindex:
load_persistables load_persistables
----------------- -----------------
.. autofunction:: paddle.v2.fluid.io.load_persistables .. autofunction:: paddle.fluid.io.load_persistables
:noindex: :noindex:
save_inference_model save_inference_model
-------------------- --------------------
.. autofunction:: paddle.v2.fluid.io.save_inference_model .. autofunction:: paddle.fluid.io.save_inference_model
:noindex: :noindex:
load_inference_model load_inference_model
-------------------- --------------------
.. autofunction:: paddle.v2.fluid.io.load_inference_model .. autofunction:: paddle.fluid.io.load_inference_model
:noindex: :noindex:
get_inference_program get_inference_program
--------------------- ---------------------
.. autofunction:: paddle.v2.fluid.io.get_inference_program .. autofunction:: paddle.fluid.io.get_inference_program
:noindex: :noindex:
...@@ -11,167 +11,167 @@ control_flow ...@@ -11,167 +11,167 @@ control_flow
split_lod_tensor split_lod_tensor
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor .. autofunction:: paddle.fluid.layers.split_lod_tensor
:noindex: :noindex:
merge_lod_tensor merge_lod_tensor
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor .. autofunction:: paddle.fluid.layers.merge_lod_tensor
:noindex: :noindex:
BlockGuard BlockGuard
---------- ----------
.. autoclass:: paddle.v2.fluid.layers.BlockGuard .. autoclass:: paddle.fluid.layers.BlockGuard
:members: :members:
:noindex: :noindex:
BlockGuardWithCompletion BlockGuardWithCompletion
------------------------ ------------------------
.. autoclass:: paddle.v2.fluid.layers.BlockGuardWithCompletion .. autoclass:: paddle.fluid.layers.BlockGuardWithCompletion
:members: :members:
:noindex: :noindex:
StaticRNNMemoryLink StaticRNNMemoryLink
------------------- -------------------
.. autoclass:: paddle.v2.fluid.layers.StaticRNNMemoryLink .. autoclass:: paddle.fluid.layers.StaticRNNMemoryLink
:members: :members:
:noindex: :noindex:
WhileGuard WhileGuard
---------- ----------
.. autoclass:: paddle.v2.fluid.layers.WhileGuard .. autoclass:: paddle.fluid.layers.WhileGuard
:members: :members:
:noindex: :noindex:
While While
----- -----
.. autoclass:: paddle.v2.fluid.layers.While .. autoclass:: paddle.fluid.layers.While
:members: :members:
:noindex: :noindex:
lod_rank_table lod_rank_table
-------------- --------------
.. autofunction:: paddle.v2.fluid.layers.lod_rank_table .. autofunction:: paddle.fluid.layers.lod_rank_table
:noindex: :noindex:
max_sequence_len max_sequence_len
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.max_sequence_len .. autofunction:: paddle.fluid.layers.max_sequence_len
:noindex: :noindex:
topk topk
---- ----
.. autofunction:: paddle.v2.fluid.layers.topk .. autofunction:: paddle.fluid.layers.topk
:noindex: :noindex:
lod_tensor_to_array lod_tensor_to_array
------------------- -------------------
.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array .. autofunction:: paddle.fluid.layers.lod_tensor_to_array
:noindex: :noindex:
array_to_lod_tensor array_to_lod_tensor
------------------- -------------------
.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor .. autofunction:: paddle.fluid.layers.array_to_lod_tensor
:noindex: :noindex:
increment increment
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.increment .. autofunction:: paddle.fluid.layers.increment
:noindex: :noindex:
array_write array_write
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.array_write .. autofunction:: paddle.fluid.layers.array_write
:noindex: :noindex:
create_array create_array
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.create_array .. autofunction:: paddle.fluid.layers.create_array
:noindex: :noindex:
less_than less_than
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.less_than .. autofunction:: paddle.fluid.layers.less_than
:noindex: :noindex:
array_read array_read
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.array_read .. autofunction:: paddle.fluid.layers.array_read
:noindex: :noindex:
shrink_memory shrink_memory
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.shrink_memory .. autofunction:: paddle.fluid.layers.shrink_memory
:noindex: :noindex:
array_length array_length
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.array_length .. autofunction:: paddle.fluid.layers.array_length
:noindex: :noindex:
IfElse IfElse
------ ------
.. autoclass:: paddle.v2.fluid.layers.IfElse .. autoclass:: paddle.fluid.layers.IfElse
:members: :members:
:noindex: :noindex:
DynamicRNN DynamicRNN
---------- ----------
.. autoclass:: paddle.v2.fluid.layers.DynamicRNN .. autoclass:: paddle.fluid.layers.DynamicRNN
:members: :members:
:noindex: :noindex:
ConditionalBlock ConditionalBlock
---------------- ----------------
.. autoclass:: paddle.v2.fluid.layers.ConditionalBlock .. autoclass:: paddle.fluid.layers.ConditionalBlock
:members: :members:
:noindex: :noindex:
StaticRNN StaticRNN
--------- ---------
.. autoclass:: paddle.v2.fluid.layers.StaticRNN .. autoclass:: paddle.fluid.layers.StaticRNN
:members: :members:
:noindex: :noindex:
reorder_lod_tensor_by_rank reorder_lod_tensor_by_rank
-------------------------- --------------------------
.. autofunction:: paddle.v2.fluid.layers.reorder_lod_tensor_by_rank .. autofunction:: paddle.fluid.layers.reorder_lod_tensor_by_rank
:noindex: :noindex:
ParallelDo ParallelDo
---------- ----------
.. autoclass:: paddle.v2.fluid.layers.ParallelDo .. autoclass:: paddle.fluid.layers.ParallelDo
:members: :members:
:noindex: :noindex:
Print Print
----- -----
.. autofunction:: paddle.v2.fluid.layers.Print .. autofunction:: paddle.fluid.layers.Print
:noindex: :noindex:
device device
...@@ -180,7 +180,7 @@ device ...@@ -180,7 +180,7 @@ device
get_places get_places
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.get_places .. autofunction:: paddle.fluid.layers.get_places
:noindex: :noindex:
io io
...@@ -189,27 +189,27 @@ io ...@@ -189,27 +189,27 @@ io
data data
---- ----
.. autofunction:: paddle.v2.fluid.layers.data .. autofunction:: paddle.fluid.layers.data
:noindex: :noindex:
BlockGuardServ BlockGuardServ
-------------- --------------
.. autoclass:: paddle.v2.fluid.layers.BlockGuardServ .. autoclass:: paddle.fluid.layers.BlockGuardServ
:members: :members:
:noindex: :noindex:
ListenAndServ ListenAndServ
------------- -------------
.. autoclass:: paddle.v2.fluid.layers.ListenAndServ .. autoclass:: paddle.fluid.layers.ListenAndServ
:members: :members:
:noindex: :noindex:
Send Send
---- ----
.. autofunction:: paddle.v2.fluid.layers.Send .. autofunction:: paddle.fluid.layers.Send
:noindex: :noindex:
nn nn
...@@ -218,259 +218,259 @@ nn ...@@ -218,259 +218,259 @@ nn
fc fc
-- --
.. autofunction:: paddle.v2.fluid.layers.fc .. autofunction:: paddle.fluid.layers.fc
:noindex: :noindex:
embedding embedding
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.embedding .. autofunction:: paddle.fluid.layers.embedding
:noindex: :noindex:
dynamic_lstm dynamic_lstm
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm .. autofunction:: paddle.fluid.layers.dynamic_lstm
:noindex: :noindex:
dynamic_lstmp dynamic_lstmp
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp .. autofunction:: paddle.fluid.layers.dynamic_lstmp
:noindex: :noindex:
dynamic_gru dynamic_gru
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.dynamic_gru .. autofunction:: paddle.fluid.layers.dynamic_gru
:noindex: :noindex:
gru_unit gru_unit
-------- --------
.. autofunction:: paddle.v2.fluid.layers.gru_unit .. autofunction:: paddle.fluid.layers.gru_unit
:noindex: :noindex:
linear_chain_crf linear_chain_crf
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf .. autofunction:: paddle.fluid.layers.linear_chain_crf
:noindex: :noindex:
crf_decoding crf_decoding
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.crf_decoding .. autofunction:: paddle.fluid.layers.crf_decoding
:noindex: :noindex:
cos_sim cos_sim
------- -------
.. autofunction:: paddle.v2.fluid.layers.cos_sim .. autofunction:: paddle.fluid.layers.cos_sim
:noindex: :noindex:
cross_entropy cross_entropy
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.cross_entropy .. autofunction:: paddle.fluid.layers.cross_entropy
:noindex: :noindex:
square_error_cost square_error_cost
----------------- -----------------
.. autofunction:: paddle.v2.fluid.layers.square_error_cost .. autofunction:: paddle.fluid.layers.square_error_cost
:noindex: :noindex:
accuracy accuracy
-------- --------
.. autofunction:: paddle.v2.fluid.layers.accuracy .. autofunction:: paddle.fluid.layers.accuracy
:noindex: :noindex:
chunk_eval chunk_eval
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.chunk_eval .. autofunction:: paddle.fluid.layers.chunk_eval
:noindex: :noindex:
sequence_conv sequence_conv
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.sequence_conv .. autofunction:: paddle.fluid.layers.sequence_conv
:noindex: :noindex:
conv2d conv2d
------ ------
.. autofunction:: paddle.v2.fluid.layers.conv2d .. autofunction:: paddle.fluid.layers.conv2d
:noindex: :noindex:
sequence_pool sequence_pool
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.sequence_pool .. autofunction:: paddle.fluid.layers.sequence_pool
:noindex: :noindex:
pool2d pool2d
------ ------
.. autofunction:: paddle.v2.fluid.layers.pool2d .. autofunction:: paddle.fluid.layers.pool2d
:noindex: :noindex:
batch_norm batch_norm
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.batch_norm .. autofunction:: paddle.fluid.layers.batch_norm
:noindex: :noindex:
layer_norm layer_norm
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.layer_norm .. autofunction:: paddle.fluid.layers.layer_norm
:noindex: :noindex:
beam_search_decode beam_search_decode
------------------ ------------------
.. autofunction:: paddle.v2.fluid.layers.beam_search_decode .. autofunction:: paddle.fluid.layers.beam_search_decode
:noindex: :noindex:
conv2d_transpose conv2d_transpose
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose .. autofunction:: paddle.fluid.layers.conv2d_transpose
:noindex: :noindex:
sequence_expand sequence_expand
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.sequence_expand .. autofunction:: paddle.fluid.layers.sequence_expand
:noindex: :noindex:
lstm_unit lstm_unit
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.lstm_unit .. autofunction:: paddle.fluid.layers.lstm_unit
:noindex: :noindex:
reduce_sum reduce_sum
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.reduce_sum .. autofunction:: paddle.fluid.layers.reduce_sum
:noindex: :noindex:
reduce_mean reduce_mean
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.reduce_mean .. autofunction:: paddle.fluid.layers.reduce_mean
:noindex: :noindex:
reduce_max reduce_max
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.reduce_max .. autofunction:: paddle.fluid.layers.reduce_max
:noindex: :noindex:
reduce_min reduce_min
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.reduce_min .. autofunction:: paddle.fluid.layers.reduce_min
:noindex: :noindex:
sequence_first_step sequence_first_step
------------------- -------------------
.. autofunction:: paddle.v2.fluid.layers.sequence_first_step .. autofunction:: paddle.fluid.layers.sequence_first_step
:noindex: :noindex:
sequence_last_step sequence_last_step
------------------ ------------------
.. autofunction:: paddle.v2.fluid.layers.sequence_last_step .. autofunction:: paddle.fluid.layers.sequence_last_step
:noindex: :noindex:
dropout dropout
------- -------
.. autofunction:: paddle.v2.fluid.layers.dropout .. autofunction:: paddle.fluid.layers.dropout
:noindex: :noindex:
split split
----- -----
.. autofunction:: paddle.v2.fluid.layers.split .. autofunction:: paddle.fluid.layers.split
:noindex: :noindex:
ctc_greedy_decoder ctc_greedy_decoder
------------------ ------------------
.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder .. autofunction:: paddle.fluid.layers.ctc_greedy_decoder
:noindex: :noindex:
edit_distance edit_distance
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.edit_distance .. autofunction:: paddle.fluid.layers.edit_distance
:noindex: :noindex:
l2_normalize l2_normalize
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.l2_normalize .. autofunction:: paddle.fluid.layers.l2_normalize
:noindex: :noindex:
matmul matmul
------ ------
.. autofunction:: paddle.v2.fluid.layers.matmul .. autofunction:: paddle.fluid.layers.matmul
:noindex: :noindex:
warpctc warpctc
------- -------
.. autofunction:: paddle.v2.fluid.layers.warpctc .. autofunction:: paddle.fluid.layers.warpctc
:noindex: :noindex:
sequence_reshape sequence_reshape
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.sequence_reshape .. autofunction:: paddle.fluid.layers.sequence_reshape
:noindex: :noindex:
transpose transpose
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.transpose .. autofunction:: paddle.fluid.layers.transpose
:noindex: :noindex:
im2sequence im2sequence
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.im2sequence .. autofunction:: paddle.fluid.layers.im2sequence
:noindex: :noindex:
nce nce
--- ---
.. autofunction:: paddle.v2.fluid.layers.nce .. autofunction:: paddle.fluid.layers.nce
:noindex: :noindex:
beam_search beam_search
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.beam_search .. autofunction:: paddle.fluid.layers.beam_search
:noindex: :noindex:
row_conv row_conv
-------- --------
.. autofunction:: paddle.v2.fluid.layers.row_conv .. autofunction:: paddle.fluid.layers.row_conv
:noindex: :noindex:
multiplex multiplex
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.multiplex .. autofunction:: paddle.fluid.layers.multiplex
:noindex: :noindex:
ops ops
...@@ -479,259 +479,259 @@ ops ...@@ -479,259 +479,259 @@ ops
mean mean
---- ----
.. autofunction:: paddle.v2.fluid.layers.mean .. autofunction:: paddle.fluid.layers.mean
:noindex: :noindex:
mul mul
--- ---
.. autofunction:: paddle.v2.fluid.layers.mul .. autofunction:: paddle.fluid.layers.mul
:noindex: :noindex:
reshape reshape
------- -------
.. autofunction:: paddle.v2.fluid.layers.reshape .. autofunction:: paddle.fluid.layers.reshape
:noindex: :noindex:
scale scale
----- -----
.. autofunction:: paddle.v2.fluid.layers.scale .. autofunction:: paddle.fluid.layers.scale
:noindex: :noindex:
sigmoid_cross_entropy_with_logits sigmoid_cross_entropy_with_logits
--------------------------------- ---------------------------------
.. autofunction:: paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits .. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
:noindex: :noindex:
elementwise_add elementwise_add
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_add .. autofunction:: paddle.fluid.layers.elementwise_add
:noindex: :noindex:
elementwise_div elementwise_div
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_div .. autofunction:: paddle.fluid.layers.elementwise_div
:noindex: :noindex:
elementwise_sub elementwise_sub
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_sub .. autofunction:: paddle.fluid.layers.elementwise_sub
:noindex: :noindex:
elementwise_mul elementwise_mul
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_mul .. autofunction:: paddle.fluid.layers.elementwise_mul
:noindex: :noindex:
elementwise_max elementwise_max
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_max .. autofunction:: paddle.fluid.layers.elementwise_max
:noindex: :noindex:
elementwise_min elementwise_min
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_min .. autofunction:: paddle.fluid.layers.elementwise_min
:noindex: :noindex:
elementwise_pow elementwise_pow
--------------- ---------------
.. autofunction:: paddle.v2.fluid.layers.elementwise_pow .. autofunction:: paddle.fluid.layers.elementwise_pow
:noindex: :noindex:
clip clip
---- ----
.. autofunction:: paddle.v2.fluid.layers.clip .. autofunction:: paddle.fluid.layers.clip
:noindex: :noindex:
clip_by_norm clip_by_norm
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.clip_by_norm .. autofunction:: paddle.fluid.layers.clip_by_norm
:noindex: :noindex:
sequence_softmax sequence_softmax
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.sequence_softmax .. autofunction:: paddle.fluid.layers.sequence_softmax
:noindex: :noindex:
sigmoid sigmoid
------- -------
.. autofunction:: paddle.v2.fluid.layers.sigmoid .. autofunction:: paddle.fluid.layers.sigmoid
:noindex: :noindex:
logsigmoid logsigmoid
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.logsigmoid .. autofunction:: paddle.fluid.layers.logsigmoid
:noindex: :noindex:
exp exp
--- ---
.. autofunction:: paddle.v2.fluid.layers.exp .. autofunction:: paddle.fluid.layers.exp
:noindex: :noindex:
relu relu
---- ----
.. autofunction:: paddle.v2.fluid.layers.relu .. autofunction:: paddle.fluid.layers.relu
:noindex: :noindex:
tanh tanh
---- ----
.. autofunction:: paddle.v2.fluid.layers.tanh .. autofunction:: paddle.fluid.layers.tanh
:noindex: :noindex:
tanh_shrink tanh_shrink
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.tanh_shrink .. autofunction:: paddle.fluid.layers.tanh_shrink
:noindex: :noindex:
softshrink softshrink
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.softshrink .. autofunction:: paddle.fluid.layers.softshrink
:noindex: :noindex:
sqrt sqrt
---- ----
.. autofunction:: paddle.v2.fluid.layers.sqrt .. autofunction:: paddle.fluid.layers.sqrt
:noindex: :noindex:
abs abs
--- ---
.. autofunction:: paddle.v2.fluid.layers.abs .. autofunction:: paddle.fluid.layers.abs
:noindex: :noindex:
ceil ceil
---- ----
.. autofunction:: paddle.v2.fluid.layers.ceil .. autofunction:: paddle.fluid.layers.ceil
:noindex: :noindex:
floor floor
----- -----
.. autofunction:: paddle.v2.fluid.layers.floor .. autofunction:: paddle.fluid.layers.floor
:noindex: :noindex:
round round
----- -----
.. autofunction:: paddle.v2.fluid.layers.round .. autofunction:: paddle.fluid.layers.round
:noindex: :noindex:
reciprocal reciprocal
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.reciprocal .. autofunction:: paddle.fluid.layers.reciprocal
:noindex: :noindex:
log log
--- ---
.. autofunction:: paddle.v2.fluid.layers.log .. autofunction:: paddle.fluid.layers.log
:noindex: :noindex:
square square
------ ------
.. autofunction:: paddle.v2.fluid.layers.square .. autofunction:: paddle.fluid.layers.square
:noindex: :noindex:
softplus softplus
-------- --------
.. autofunction:: paddle.v2.fluid.layers.softplus .. autofunction:: paddle.fluid.layers.softplus
:noindex: :noindex:
softsign softsign
-------- --------
.. autofunction:: paddle.v2.fluid.layers.softsign .. autofunction:: paddle.fluid.layers.softsign
:noindex: :noindex:
brelu brelu
----- -----
.. autofunction:: paddle.v2.fluid.layers.brelu .. autofunction:: paddle.fluid.layers.brelu
:noindex: :noindex:
leaky_relu leaky_relu
---------- ----------
.. autofunction:: paddle.v2.fluid.layers.leaky_relu .. autofunction:: paddle.fluid.layers.leaky_relu
:noindex: :noindex:
soft_relu soft_relu
--------- ---------
.. autofunction:: paddle.v2.fluid.layers.soft_relu .. autofunction:: paddle.fluid.layers.soft_relu
:noindex: :noindex:
elu elu
--- ---
.. autofunction:: paddle.v2.fluid.layers.elu .. autofunction:: paddle.fluid.layers.elu
:noindex: :noindex:
relu6 relu6
----- -----
.. autofunction:: paddle.v2.fluid.layers.relu6 .. autofunction:: paddle.fluid.layers.relu6
:noindex: :noindex:
pow pow
--- ---
.. autofunction:: paddle.v2.fluid.layers.pow .. autofunction:: paddle.fluid.layers.pow
:noindex: :noindex:
stanh stanh
----- -----
.. autofunction:: paddle.v2.fluid.layers.stanh .. autofunction:: paddle.fluid.layers.stanh
:noindex: :noindex:
hard_shrink hard_shrink
----------- -----------
.. autofunction:: paddle.v2.fluid.layers.hard_shrink .. autofunction:: paddle.fluid.layers.hard_shrink
:noindex: :noindex:
thresholded_relu thresholded_relu
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.thresholded_relu .. autofunction:: paddle.fluid.layers.thresholded_relu
:noindex: :noindex:
hard_sigmoid hard_sigmoid
------------ ------------
.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid .. autofunction:: paddle.fluid.layers.hard_sigmoid
:noindex: :noindex:
swish swish
----- -----
.. autofunction:: paddle.v2.fluid.layers.swish .. autofunction:: paddle.fluid.layers.swish
:noindex: :noindex:
tensor tensor
...@@ -740,66 +740,66 @@ tensor ...@@ -740,66 +740,66 @@ tensor
create_tensor create_tensor
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.create_tensor .. autofunction:: paddle.fluid.layers.create_tensor
:noindex: :noindex:
create_parameter create_parameter
---------------- ----------------
.. autofunction:: paddle.v2.fluid.layers.create_parameter .. autofunction:: paddle.fluid.layers.create_parameter
:noindex: :noindex:
create_global_var create_global_var
----------------- -----------------
.. autofunction:: paddle.v2.fluid.layers.create_global_var .. autofunction:: paddle.fluid.layers.create_global_var
:noindex: :noindex:
cast cast
---- ----
.. autofunction:: paddle.v2.fluid.layers.cast .. autofunction:: paddle.fluid.layers.cast
:noindex: :noindex:
concat concat
------ ------
.. autofunction:: paddle.v2.fluid.layers.concat .. autofunction:: paddle.fluid.layers.concat
:noindex: :noindex:
sums sums
---- ----
.. autofunction:: paddle.v2.fluid.layers.sums .. autofunction:: paddle.fluid.layers.sums
:noindex: :noindex:
assign assign
------ ------
.. autofunction:: paddle.v2.fluid.layers.assign .. autofunction:: paddle.fluid.layers.assign
:noindex: :noindex:
fill_constant_batch_size_like fill_constant_batch_size_like
----------------------------- -----------------------------
.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like .. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like
:noindex: :noindex:
fill_constant fill_constant
------------- -------------
.. autofunction:: paddle.v2.fluid.layers.fill_constant .. autofunction:: paddle.fluid.layers.fill_constant
:noindex: :noindex:
ones ones
---- ----
.. autofunction:: paddle.v2.fluid.layers.ones .. autofunction:: paddle.fluid.layers.ones
:noindex: :noindex:
zeros zeros
----- -----
.. autofunction:: paddle.v2.fluid.layers.zeros .. autofunction:: paddle.fluid.layers.zeros
:noindex: :noindex:
...@@ -8,24 +8,24 @@ nets ...@@ -8,24 +8,24 @@ nets
simple_img_conv_pool simple_img_conv_pool
-------------------- --------------------
.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool .. autofunction:: paddle.fluid.nets.simple_img_conv_pool
:noindex: :noindex:
sequence_conv_pool sequence_conv_pool
------------------ ------------------
.. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool .. autofunction:: paddle.fluid.nets.sequence_conv_pool
:noindex: :noindex:
glu glu
--- ---
.. autofunction:: paddle.v2.fluid.nets.glu .. autofunction:: paddle.fluid.nets.glu
:noindex: :noindex:
scaled_dot_product_attention scaled_dot_product_attention
---------------------------- ----------------------------
.. autofunction:: paddle.v2.fluid.nets.scaled_dot_product_attention .. autofunction:: paddle.fluid.nets.scaled_dot_product_attention
:noindex: :noindex:
...@@ -8,42 +8,42 @@ optimizer ...@@ -8,42 +8,42 @@ optimizer
SGD SGD
--- ---
.. autoclass:: paddle.v2.fluid.optimizer.SGD .. autoclass:: paddle.fluid.optimizer.SGD
:members: :members:
:noindex: :noindex:
Momentum Momentum
-------- --------
.. autoclass:: paddle.v2.fluid.optimizer.Momentum .. autoclass:: paddle.fluid.optimizer.Momentum
:members: :members:
:noindex: :noindex:
Adagrad Adagrad
------- -------
.. autoclass:: paddle.v2.fluid.optimizer.Adagrad .. autoclass:: paddle.fluid.optimizer.Adagrad
:members: :members:
:noindex: :noindex:
Adam Adam
---- ----
.. autoclass:: paddle.v2.fluid.optimizer.Adam .. autoclass:: paddle.fluid.optimizer.Adam
:members: :members:
:noindex: :noindex:
Adamax Adamax
------ ------
.. autoclass:: paddle.v2.fluid.optimizer.Adamax .. autoclass:: paddle.fluid.optimizer.Adamax
:members: :members:
:noindex: :noindex:
DecayedAdagrad DecayedAdagrad
-------------- --------------
.. autoclass:: paddle.v2.fluid.optimizer.DecayedAdagrad .. autoclass:: paddle.fluid.optimizer.DecayedAdagrad
:members: :members:
:noindex: :noindex:
...@@ -8,14 +8,14 @@ param_attr ...@@ -8,14 +8,14 @@ param_attr
ParamAttr ParamAttr
--------- ---------
.. autoclass:: paddle.v2.fluid.param_attr.ParamAttr .. autoclass:: paddle.fluid.param_attr.ParamAttr
:members: :members:
:noindex: :noindex:
WeightNormParamAttr WeightNormParamAttr
------------------- -------------------
.. autoclass:: paddle.v2.fluid.param_attr.WeightNormParamAttr .. autoclass:: paddle.fluid.param_attr.WeightNormParamAttr
:members: :members:
:noindex: :noindex:
...@@ -8,18 +8,18 @@ profiler ...@@ -8,18 +8,18 @@ profiler
cuda_profiler cuda_profiler
------------- -------------
.. autofunction:: paddle.v2.fluid.profiler.cuda_profiler .. autofunction:: paddle.fluid.profiler.cuda_profiler
:noindex: :noindex:
reset_profiler reset_profiler
-------------- --------------
.. autofunction:: paddle.v2.fluid.profiler.reset_profiler .. autofunction:: paddle.fluid.profiler.reset_profiler
:noindex: :noindex:
profiler profiler
-------- --------
.. autofunction:: paddle.v2.fluid.profiler.profiler .. autofunction:: paddle.fluid.profiler.profiler
:noindex: :noindex:
...@@ -8,20 +8,20 @@ regularizer ...@@ -8,20 +8,20 @@ regularizer
append_regularization_ops append_regularization_ops
------------------------- -------------------------
.. autofunction:: paddle.v2.fluid.regularizer.append_regularization_ops .. autofunction:: paddle.fluid.regularizer.append_regularization_ops
:noindex: :noindex:
L1Decay L1Decay
------- -------
.. autoclass:: paddle.v2.fluid.regularizer.L1Decay .. autoclass:: paddle.fluid.regularizer.L1Decay
:members: :members:
:noindex: :noindex:
L2Decay L2Decay
------- -------
.. autoclass:: paddle.v2.fluid.regularizer.L2Decay .. autoclass:: paddle.fluid.regularizer.L2Decay
:members: :members:
:noindex: :noindex:
...@@ -18,6 +18,7 @@ import shlex ...@@ -18,6 +18,7 @@ import shlex
from recommonmark import parser, transform from recommonmark import parser, transform
import paddle import paddle
import paddle.v2 import paddle.v2
import paddle.fluid
MarkdownParser = parser.CommonMarkParser MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify AutoStructify = transform.AutoStructify
......
...@@ -18,6 +18,7 @@ import shlex ...@@ -18,6 +18,7 @@ import shlex
from recommonmark import parser, transform from recommonmark import parser, transform
import paddle import paddle
import paddle.v2 import paddle.v2
import paddle.fluid
MarkdownParser = parser.CommonMarkParser MarkdownParser = parser.CommonMarkParser
......
...@@ -68,9 +68,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto) ...@@ -68,9 +68,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto)
add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(framework_py_proto framework_py_proto_init) add_dependencies(framework_py_proto framework_py_proto_init)
add_custom_command(TARGET framework_py_proto POST_BUILD add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto
COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto/ COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/
COMMENT "Copy generated python proto into directory paddle/v2/fluid/proto." COMMENT "Copy generated python proto into directory paddle/fluid/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
cc_library(backward SRCS backward.cc DEPS net_op) cc_library(backward SRCS backward.cc DEPS net_op)
......
...@@ -4,7 +4,7 @@ function(inference_test TARGET_NAME) ...@@ -4,7 +4,7 @@ function(inference_test TARGET_NAME)
set(multiValueArgs ARGS) set(multiValueArgs ARGS)
cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/fluid/tests)
set(arg_list "") set(arg_list "")
if(inference_test_ARGS) if(inference_test_ARGS)
foreach(arg ${inference_test_ARGS}) foreach(arg ${inference_test_ARGS})
......
...@@ -83,7 +83,7 @@ class CompareOp : public framework::OperatorWithKernel { ...@@ -83,7 +83,7 @@ class CompareOp : public framework::OperatorWithKernel {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
#define REGISTER_LOGICAL_OP(op_type, _equation) \ #define REGISTER_COMPARE_OP(op_type, _equation) \
struct _##op_type##Comment { \ struct _##op_type##Comment { \
static char type[]; \ static char type[]; \
static char equation[]; \ static char equation[]; \
...@@ -96,11 +96,17 @@ class CompareOp : public framework::OperatorWithKernel { ...@@ -96,11 +96,17 @@ class CompareOp : public framework::OperatorWithKernel {
::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \ ::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker); ::paddle::framework::EmptyGradOpMaker);
REGISTER_LOGICAL_OP(less_than, "Out = X < Y"); REGISTER_COMPARE_OP(less_than, "Out = X < Y");
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); REGISTER_COMPARE_OP(less_equal, "Out = X <= Y");
REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_OP(equal, "Out = X == Y"); REGISTER_COMPARE_OP(greater_than, "Out = X > Y");
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); REGISTER_COMPARE_KERNEL(greater_than, CPU,
REGISTER_LOGICAL_OP(not_equal, "Out = X != Y"); paddle::operators::GreaterThanFunctor);
REGISTER_LOGICAL_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor); REGISTER_COMPARE_OP(greater_equal, "Out = X >= Y");
REGISTER_COMPARE_KERNEL(greater_equal, CPU,
paddle::operators::GreaterEqualFunctor);
REGISTER_COMPARE_OP(equal, "Out = X == Y");
REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor);
REGISTER_COMPARE_OP(not_equal, "Out = X != Y");
REGISTER_COMPARE_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor);
...@@ -14,7 +14,11 @@ limitations under the License. */ ...@@ -14,7 +14,11 @@ limitations under the License. */
#include "paddle/fluid/operators/compare_op.h" #include "paddle/fluid/operators/compare_op.h"
REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); REGISTER_COMPARE_KERNEL(greater_than, CUDA,
REGISTER_LOGICAL_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); paddle::operators::GreaterThanFunctor);
REGISTER_COMPARE_KERNEL(greater_equal, CUDA,
paddle::operators::GreaterEqualFunctor);
REGISTER_COMPARE_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);
REGISTER_COMPARE_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor);
...@@ -34,6 +34,18 @@ struct LessEqualFunctor { ...@@ -34,6 +34,18 @@ struct LessEqualFunctor {
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; } HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; }
}; };
template <typename T>
struct GreaterThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; }
};
template <typename T>
struct GreaterEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; }
};
template <typename T> template <typename T>
struct EqualFunctor { struct EqualFunctor {
using ELEM_TYPE = T; using ELEM_TYPE = T;
...@@ -76,7 +88,7 @@ class CompareOpKernel ...@@ -76,7 +88,7 @@ class CompareOpKernel
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \ #define REGISTER_COMPARE_KERNEL(op_type, dev, functor) \
REGISTER_OP_##dev##_KERNEL( \ REGISTER_OP_##dev##_KERNEL( \
op_type, ::paddle::operators::CompareOpKernel< \ op_type, ::paddle::operators::CompareOpKernel< \
::paddle::platform::dev##DeviceContext, functor<int>>, \ ::paddle::platform::dev##DeviceContext, functor<int>>, \
......
...@@ -129,9 +129,6 @@ TEST(NCCL, all_reduce) { ...@@ -129,9 +129,6 @@ TEST(NCCL, all_reduce) {
} // namespace paddle } // namespace paddle
int main(int argc, char** argv) { int main(int argc, char** argv) {
// FIXME(tonyyang-svail):
// Due to the driver issue on our CI, disable for now
return 0;
dev_count = paddle::platform::GetCUDADeviceCount(); dev_count = paddle::platform::GetCUDADeviceCount();
if (dev_count <= 1) { if (dev_count <= 1) {
LOG(WARNING) LOG(WARNING)
......
...@@ -49,11 +49,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray); ...@@ -49,11 +49,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
static size_t UniqueIntegerGenerator(const std::string &prefix) {
static std::unordered_map<std::string, std::atomic<size_t>> generators;
return generators[prefix].fetch_add(1);
}
bool IsCompiledWithCUDA() { bool IsCompiledWithCUDA() {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
return false; return false;
...@@ -410,7 +405,6 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -410,7 +405,6 @@ All parameter, weight, gradient are variables in Paddle.
(void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) & (void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
Executor::Run); Executor::Run);
m.def("unique_integer", UniqueIntegerGenerator);
m.def("init_gflags", framework::InitGflags); m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG); m.def("init_glog", framework::InitGLOG);
m.def("init_devices", &framework::InitDevices); m.def("init_devices", &framework::InitDevices);
......
...@@ -171,7 +171,7 @@ EOF ...@@ -171,7 +171,7 @@ EOF
EOF EOF
if [[ ${WITH_GPU} == "ON" ]]; then if [[ ${WITH_GPU} == "ON" ]]; then
NCCL_DEPS="apt-get install -y libnccl-dev &&" NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&"
else else
NCCL_DEPS="" NCCL_DEPS=""
fi fi
......
...@@ -3,12 +3,14 @@ file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py) ...@@ -3,12 +3,14 @@ file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py)
file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py) file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py)
file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py)
file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py) file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py)
file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/ *.py)
set(PY_FILES paddle/__init__.py set(PY_FILES paddle/__init__.py
${TRAINER_PY_FILES} ${TRAINER_PY_FILES}
${HELPERS_PY_FILES} ${HELPERS_PY_FILES}
${UTILS_PY_FILES} ${UTILS_PY_FILES}
${V2_PY_FILES}) ${V2_PY_FILES}
${FLUID_PY_FILES})
add_custom_target(copy_paddle_master) add_custom_target(copy_paddle_master)
...@@ -43,10 +45,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ...@@ -43,10 +45,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
${CMAKE_CURRENT_BINARY_DIR}/setup.py) ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so
COMMAND cmake -E copy $<TARGET_FILE:paddle_pybind> ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so COMMAND cmake -E copy $<TARGET_FILE:paddle_pybind> ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so
DEPENDS paddle_pybind) DEPENDS paddle_pybind)
add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so) add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so)
add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp
...@@ -72,7 +74,7 @@ if (WITH_TESTING) ...@@ -72,7 +74,7 @@ if (WITH_TESTING)
add_subdirectory(paddle/v2/tests) add_subdirectory(paddle/v2/tests)
add_subdirectory(paddle/v2/reader/tests) add_subdirectory(paddle/v2/reader/tests)
add_subdirectory(paddle/v2/plot/tests) add_subdirectory(paddle/v2/plot/tests)
add_subdirectory(paddle/v2/fluid/tests) add_subdirectory(paddle/fluid/tests)
endif() endif()
endif() endif()
install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR} install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR}
......
...@@ -39,6 +39,7 @@ from concurrency import (Go, make_channel, channel_send, channel_recv, ...@@ -39,6 +39,7 @@ from concurrency import (Go, make_channel, channel_send, channel_recv,
import clip import clip
from memory_optimization_transpiler import memory_optimize from memory_optimization_transpiler import memory_optimize
import profiler import profiler
import unique_name
Tensor = LoDTensor Tensor = LoDTensor
...@@ -63,6 +64,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ ...@@ -63,6 +64,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [
'DistributeTranspiler', 'DistributeTranspiler',
'memory_optimize', 'memory_optimize',
'profiler', 'profiler',
'unique_name',
] ]
......
...@@ -12,10 +12,11 @@ ...@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.v2.fluid import framework as framework from paddle.fluid import framework as framework
from . import core from . import core
import collections import collections
import copy import copy
import unique_name
__all__ = [ __all__ = [
'append_backward', 'append_backward',
...@@ -391,7 +392,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map): ...@@ -391,7 +392,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
for name in op_desc.output_arg_names(): for name in op_desc.output_arg_names():
if block.desc.find_var(name.encode("ascii")): if block.desc.find_var(name.encode("ascii")):
new_name = "%s_%s" % (name, core.unique_integer(name)) new_name = unique_name.generate(name)
op_desc.rename_output(name, new_name) op_desc.rename_output(name, new_name)
var_map[name] = new_name var_map[name] = new_name
......
...@@ -26,7 +26,7 @@ A `scoped_function` will take a `function` as input. That function will be ...@@ -26,7 +26,7 @@ A `scoped_function` will take a `function` as input. That function will be
invoked in a new local scope. invoked in a new local scope.
""" """
import paddle.v2.fluid.core import paddle.fluid.core
import threading import threading
__tl_scope__ = threading.local() __tl_scope__ = threading.local()
...@@ -44,13 +44,13 @@ __all__ = [ ...@@ -44,13 +44,13 @@ __all__ = [
def get_cur_scope(): def get_cur_scope():
""" """
Get current scope. Get current scope.
:rtype: paddle.v2.fluid.core.Scope :rtype: paddle.fluid.core.Scope
""" """
cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None)
if cur_scope_stack is None: if cur_scope_stack is None:
__tl_scope__.cur_scope = list() __tl_scope__.cur_scope = list()
if len(__tl_scope__.cur_scope) == 0: if len(__tl_scope__.cur_scope) == 0:
__tl_scope__.cur_scope.append(paddle.v2.fluid.core.Scope()) __tl_scope__.cur_scope.append(paddle.fluid.core.Scope())
return __tl_scope__.cur_scope[-1] return __tl_scope__.cur_scope[-1]
......
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
import numpy as np import numpy as np
import layers import layers
from framework import Program, unique_name, Variable, program_guard from framework import Program, Variable, program_guard
import unique_name
from layer_helper import LayerHelper from layer_helper import LayerHelper
__all__ = [ __all__ = [
...@@ -96,7 +97,7 @@ class Evaluator(object): ...@@ -96,7 +97,7 @@ class Evaluator(object):
""" """
state = self.helper.create_variable( state = self.helper.create_variable(
name="_".join([unique_name(self.helper.name), suffix]), name="_".join([unique_name.generate(self.helper.name), suffix]),
persistable=True, persistable=True,
dtype=dtype, dtype=dtype,
shape=shape) shape=shape)
......
...@@ -20,6 +20,7 @@ import numpy as np ...@@ -20,6 +20,7 @@ import numpy as np
import proto.framework_pb2 as framework_pb2 import proto.framework_pb2 as framework_pb2
from . import core from . import core
import unique_name
__all__ = [ __all__ = [
'Block', 'Block',
...@@ -47,20 +48,6 @@ def grad_var_name(var_name): ...@@ -47,20 +48,6 @@ def grad_var_name(var_name):
return var_name + GRAD_VAR_SUFFIX return var_name + GRAD_VAR_SUFFIX
def unique_name(prefix):
"""
Generate unique names with prefix
Args:
prefix(str): The prefix of return string
Returns(str): A unique string with the prefix
"""
uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)])
def convert_np_dtype_to_dtype_(np_dtype): def convert_np_dtype_to_dtype_(np_dtype):
""" """
Convert the data type in numpy to the data type in Paddle Convert the data type in numpy to the data type in Paddle
...@@ -175,7 +162,7 @@ class Variable(object): ...@@ -175,7 +162,7 @@ class Variable(object):
self.error_clip = error_clip self.error_clip = error_clip
if name is None: if name is None:
name = Variable._unique_var_name_() name = unique_name.generate('_generated_var')
is_new_var = False is_new_var = False
self.desc = self.block.desc.find_var(name) self.desc = self.block.desc.find_var(name)
...@@ -307,12 +294,6 @@ class Variable(object): ...@@ -307,12 +294,6 @@ class Variable(object):
def type(self): def type(self):
return self.desc.type() return self.desc.type()
@staticmethod
def _unique_var_name_():
prefix = "_generated_var"
uid = core.unique_integer(prefix) # unique during whole process.
return "_".join([prefix, str(uid)])
def set_error_clip(self, error_clip): def set_error_clip(self, error_clip):
self.error_clip = error_clip self.error_clip = error_clip
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
import os import os
from paddle.v2.fluid.evaluator import Evaluator from paddle.fluid.evaluator import Evaluator
from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable from paddle.fluid.framework import Program, Parameter, default_main_program, Variable
from . import core from . import core
__all__ = [ __all__ = [
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
import copy import copy
import itertools import itertools
from framework import Variable, Parameter, default_main_program, default_startup_program, \ from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
unique_name, dtype_is_floating import unique_name
from paddle.v2.fluid.initializer import Constant, Xavier from paddle.fluid.initializer import Constant, Xavier
from param_attr import ParamAttr, WeightNormParamAttr from param_attr import ParamAttr, WeightNormParamAttr
...@@ -27,7 +27,7 @@ class LayerHelper(object): ...@@ -27,7 +27,7 @@ class LayerHelper(object):
self.layer_type = layer_type self.layer_type = layer_type
name = self.kwargs.get('name', None) name = self.kwargs.get('name', None)
if name is None: if name is None:
self.kwargs['name'] = unique_name(self.layer_type) self.kwargs['name'] = unique_name.generate(self.layer_type)
@property @property
def name(self): def name(self):
...@@ -117,17 +117,20 @@ class LayerHelper(object): ...@@ -117,17 +117,20 @@ class LayerHelper(object):
block=self.startup_program.global_block()): block=self.startup_program.global_block()):
if out is None: if out is None:
out = block.create_var( out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_norm'])), name=unique_name.generate(".".join(
[self.name, 'weight_norm_norm'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
abs_out = block.create_var( abs_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_abs'])), name=unique_name.generate(".".join(
[self.name, 'weight_norm_abs'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
block.append_op( block.append_op(
type='abs', inputs={'X': x}, outputs={'Out': abs_out}) type='abs', inputs={'X': x}, outputs={'Out': abs_out})
pow_out = block.create_var( pow_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_pow'])), name=unique_name.generate(".".join(
[self.name, 'weight_norm_pow'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
block.append_op( block.append_op(
...@@ -136,7 +139,8 @@ class LayerHelper(object): ...@@ -136,7 +139,8 @@ class LayerHelper(object):
outputs={'Out': pow_out}, outputs={'Out': pow_out},
attrs={'factor': float(p)}) attrs={'factor': float(p)})
sum_out = block.create_var( sum_out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_sum'])), name=unique_name.generate(".".join(
[self.name, 'weight_norm_sum'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
block.append_op( block.append_op(
...@@ -161,7 +165,7 @@ class LayerHelper(object): ...@@ -161,7 +165,7 @@ class LayerHelper(object):
block=self.startup_program.global_block()): block=self.startup_program.global_block()):
if out is None: if out is None:
out = block.create_var( out = block.create_var(
name=unique_name(".".join( name=unique_name.generate(".".join(
[self.name, 'weight_norm_reshape'])), [self.name, 'weight_norm_reshape'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
...@@ -178,7 +182,7 @@ class LayerHelper(object): ...@@ -178,7 +182,7 @@ class LayerHelper(object):
block=self.startup_program.global_block()): block=self.startup_program.global_block()):
if out is None: if out is None:
out = block.create_var( out = block.create_var(
name=unique_name(".".join( name=unique_name.generate(".".join(
[self.name, 'weight_norm_transpose'])), [self.name, 'weight_norm_transpose'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
...@@ -196,7 +200,8 @@ class LayerHelper(object): ...@@ -196,7 +200,8 @@ class LayerHelper(object):
"""Computes the norm over all dimensions except dim""" """Computes the norm over all dimensions except dim"""
if out is None: if out is None:
out = block.create_var( out = block.create_var(
name=unique_name(".".join([self.name, 'weight_norm_norm'])), name=unique_name.generate(".".join(
[self.name, 'weight_norm_norm'])),
dtype=dtype, dtype=dtype,
persistable=False) persistable=False)
if dim is None: if dim is None:
...@@ -286,7 +291,7 @@ class LayerHelper(object): ...@@ -286,7 +291,7 @@ class LayerHelper(object):
assert isinstance(attr, ParamAttr) assert isinstance(attr, ParamAttr)
suffix = 'b' if is_bias else 'w' suffix = 'b' if is_bias else 'w'
if attr.name is None: if attr.name is None:
attr.name = unique_name(".".join([self.name, suffix])) attr.name = unique_name.generate(".".join([self.name, suffix]))
if default_initializer is None and attr.initializer is None: if default_initializer is None and attr.initializer is None:
if is_bias: if is_bias:
...@@ -316,7 +321,7 @@ class LayerHelper(object): ...@@ -316,7 +321,7 @@ class LayerHelper(object):
def create_tmp_variable(self, dtype, stop_gradient=False): def create_tmp_variable(self, dtype, stop_gradient=False):
return self.main_program.current_block().create_var( return self.main_program.current_block().create_var(
name=unique_name(".".join([self.name, 'tmp'])), name=unique_name.generate(".".join([self.name, 'tmp'])),
dtype=dtype, dtype=dtype,
persistable=False, persistable=False,
stop_gradient=stop_gradient) stop_gradient=stop_gradient)
......
...@@ -428,7 +428,8 @@ class StaticRNN(object): ...@@ -428,7 +428,8 @@ class StaticRNN(object):
raise ValueError( raise ValueError(
"if init is None, memory at least need shape and batch_ref") "if init is None, memory at least need shape and batch_ref")
parent_block = self.parent_block() parent_block = self.parent_block()
var_name = unique_name("@".join([self.helper.name, "memory_boot"])) var_name = unique_name.generate("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var( boot_var = parent_block.create_var(
name=var_name, name=var_name,
shape=shape, shape=shape,
...@@ -450,7 +451,7 @@ class StaticRNN(object): ...@@ -450,7 +451,7 @@ class StaticRNN(object):
return self.memory(init=boot_var) return self.memory(init=boot_var)
else: else:
pre_mem = self.helper.create_variable( pre_mem = self.helper.create_variable(
name=unique_name("@".join([self.helper.name, "mem"])), name=unique_name.generate("@".join([self.helper.name, "mem"])),
dtype=init.dtype, dtype=init.dtype,
shape=init.shape) shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink( self.memories[pre_mem.name] = StaticRNNMemoryLink(
...@@ -652,7 +653,8 @@ class While(object): ...@@ -652,7 +653,8 @@ class While(object):
parent_block.append_op( parent_block.append_op(
type='while', type='while',
inputs={ inputs={
'X': [parent_block.var(x_name) for x_name in x_name_list], 'X':
[parent_block.var_recursive(x_name) for x_name in x_name_list],
'Condition': [self.cond_var] 'Condition': [self.cond_var]
}, },
outputs={'Out': out_vars, outputs={'Out': out_vars,
...@@ -709,7 +711,7 @@ def lod_rank_table(x, level=0): ...@@ -709,7 +711,7 @@ def lod_rank_table(x, level=0):
helper = LayerHelper("lod_rank_table", **locals()) helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable( table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE, type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name("lod_rank_table")) name=unique_name.generate("lod_rank_table"))
helper.append_op( helper.append_op(
type='lod_rank_table', type='lod_rank_table',
inputs={'X': x}, inputs={'X': x},
...@@ -807,7 +809,7 @@ def lod_tensor_to_array(x, table): ...@@ -807,7 +809,7 @@ def lod_tensor_to_array(x, table):
""" """
helper = LayerHelper("lod_tensor_to_array", **locals()) helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable( array = helper.create_variable(
name=unique_name("lod_tensor_to_array"), name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype) dtype=x.dtype)
helper.append_op( helper.append_op(
...@@ -1264,11 +1266,11 @@ class IfElse(object): ...@@ -1264,11 +1266,11 @@ class IfElse(object):
if id(x) not in self.input_table: if id(x) not in self.input_table:
parent_block = self.parent_block() parent_block = self.parent_block()
out_true = parent_block.create_var( out_true = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name), name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype) dtype=x.dtype)
out_false = parent_block.create_var( out_false = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name), name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype) dtype=x.dtype)
parent_block.append_op( parent_block.append_op(
type='split_lod_tensor', type='split_lod_tensor',
...@@ -1310,7 +1312,8 @@ class IfElse(object): ...@@ -1310,7 +1312,8 @@ class IfElse(object):
raise TypeError("Each output should be a variable") raise TypeError("Each output should be a variable")
# create outside tensor # create outside tensor
outside_out = parent_block.create_var( outside_out = parent_block.create_var(
name=unique_name("_".join([self.helper.name, 'output'])), name=unique_name.generate("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype) dtype=each_out.dtype)
out_table.append(outside_out) out_table.append(outside_out)
...@@ -1373,7 +1376,7 @@ class DynamicRNN(object): ...@@ -1373,7 +1376,7 @@ class DynamicRNN(object):
parent_block = self._parent_block_() parent_block = self._parent_block_()
if self.lod_rank_table is None: if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var( self.lod_rank_table = parent_block.create_var(
name=unique_name('lod_rank_table'), name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE) type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True self.lod_rank_table.stop_gradient = True
parent_block.append_op( parent_block.append_op(
...@@ -1381,7 +1384,8 @@ class DynamicRNN(object): ...@@ -1381,7 +1384,8 @@ class DynamicRNN(object):
inputs={"X": x}, inputs={"X": x},
outputs={"Out": self.lod_rank_table}) outputs={"Out": self.lod_rank_table})
self.max_seq_len = parent_block.create_var( self.max_seq_len = parent_block.create_var(
name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64') name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False self.max_seq_len.stop_gradient = False
parent_block.append_op( parent_block.append_op(
type='max_sequence_len', type='max_sequence_len',
...@@ -1395,7 +1399,7 @@ class DynamicRNN(object): ...@@ -1395,7 +1399,7 @@ class DynamicRNN(object):
outputs={'Out': self.cond}) outputs={'Out': self.cond})
input_array = parent_block.create_var( input_array = parent_block.create_var(
name=unique_name('dynamic_rnn_input_array'), name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype) dtype=x.dtype)
self.input_array.append((input_array, x.dtype)) self.input_array.append((input_array, x.dtype))
...@@ -1416,7 +1420,7 @@ class DynamicRNN(object): ...@@ -1416,7 +1420,7 @@ class DynamicRNN(object):
"static_input() must be called after step_input().") "static_input() must be called after step_input().")
parent_block = self._parent_block_() parent_block = self._parent_block_()
x_reordered = parent_block.create_var( x_reordered = parent_block.create_var(
name=unique_name("dynamic_rnn_static_input_reordered"), name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype) dtype=x.dtype)
parent_block.append_op( parent_block.append_op(
...@@ -1478,7 +1482,7 @@ class DynamicRNN(object): ...@@ -1478,7 +1482,7 @@ class DynamicRNN(object):
'invoked before ' 'invoked before '
'memory(init=init, need_reordered=True, ...).') 'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var( init_reordered = parent_block.create_var(
name=unique_name('dynamic_rnn_mem_init_reordered'), name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype) dtype=init.dtype)
parent_block.append_op( parent_block.append_op(
...@@ -1490,7 +1494,7 @@ class DynamicRNN(object): ...@@ -1490,7 +1494,7 @@ class DynamicRNN(object):
outputs={'Out': [init_reordered]}) outputs={'Out': [init_reordered]})
init_tensor = init_reordered init_tensor = init_reordered
mem_array = parent_block.create_var( mem_array = parent_block.create_var(
name=unique_name('dynamic_rnn_mem_array'), name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype) dtype=init.dtype)
parent_block.append_op( parent_block.append_op(
...@@ -1510,9 +1514,10 @@ class DynamicRNN(object): ...@@ -1510,9 +1514,10 @@ class DynamicRNN(object):
) )
parent_block = self._parent_block_() parent_block = self._parent_block_()
init = parent_block.create_var( init = parent_block.create_var(
name=unique_name('mem_init'), dtype=dtype) name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0] arr, dtype = self.input_array[0]
in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype) in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op( parent_block.append_op(
type='read_from_array', type='read_from_array',
inputs={'X': [arr], inputs={'X': [arr],
...@@ -1551,7 +1556,7 @@ class DynamicRNN(object): ...@@ -1551,7 +1556,7 @@ class DynamicRNN(object):
parent_block = self._parent_block_() parent_block = self._parent_block_()
for each in outputs: for each in outputs:
outside_array = parent_block.create_var( outside_array = parent_block.create_var(
name=unique_name("_".join( name=unique_name.generate("_".join(
[self.helper.name, "output_array", each.name])), [self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype) dtype=each.dtype)
......
...@@ -25,7 +25,8 @@ __all__ = ['get_places'] ...@@ -25,7 +25,8 @@ __all__ = ['get_places']
@autodoc() @autodoc()
def get_places(device_count=None, device_type=None): def get_places(device_count=None, device_type=None):
helper = LayerHelper('get_places', **locals()) helper = LayerHelper('get_places', **locals())
out_places = helper.create_variable(name=unique_name(helper.name + ".out")) out_places = helper.create_variable(
name=unique_name.generate(helper.name + ".out"))
attrs = dict() attrs = dict()
if device_count is not None: if device_count is not None:
attrs['device_count'] = int(device_count) attrs['device_count'] = int(device_count)
......
...@@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable'] ...@@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable']
def monkey_patch_variable(): def monkey_patch_variable():
def unique_tmp_name(): def unique_tmp_name():
return unique_name("tmp") return unique_name.generate("tmp")
def safe_get_dtype(var): def safe_get_dtype(var):
try: try:
...@@ -157,7 +157,9 @@ def monkey_patch_variable(): ...@@ -157,7 +157,9 @@ def monkey_patch_variable():
("__eq__", "equal", False), ("__eq__", "equal", False),
("__ne__", "not_equal", False), ("__ne__", "not_equal", False),
("__lt__", "less_than", False), ("__lt__", "less_than", False),
("__le__", "less_equal", False)): ("__le__", "less_equal", False),
("__gt__", "greater_than", False),
("__ge__", "greater_equal", False)):
setattr(Variable, method_name, setattr(Variable, method_name,
_elemwise_method_creator_(method_name, op_type, reverse)) _elemwise_method_creator_(method_name, op_type, reverse))
......
...@@ -1519,21 +1519,21 @@ def batch_norm(input, ...@@ -1519,21 +1519,21 @@ def batch_norm(input,
bias = helper.create_parameter( bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_global_variable( mean = helper.create_parameter(
name=moving_mean_name, attr=ParamAttr(
dtype=input.dtype, name=moving_mean_name, initializer=Constant(0.0), trainable=False),
shape=param_shape, shape=param_shape,
persistable=True, dtype=input.dtype)
stop_gradient=True) mean.stop_gradient = True
helper.set_variable_initializer(var=mean, initializer=Constant(0.0))
variance = helper.create_global_variable( variance = helper.create_parameter(
name=moving_variance_name, attr=ParamAttr(
dtype=input.dtype, name=moving_variance_name,
initializer=Constant(1.0),
trainable=False),
shape=param_shape, shape=param_shape,
persistable=True, dtype=input.dtype)
stop_gradient=True) variance.stop_gradient = True
helper.set_variable_initializer(var=variance, initializer=Constant(1.0))
# create output # create output
# mean and mean_out share the same memory # mean and mean_out share the same memory
......
...@@ -17,8 +17,8 @@ import json ...@@ -17,8 +17,8 @@ import json
import logging import logging
from collections import defaultdict from collections import defaultdict
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 import paddle.fluid.proto.framework_pb2 as framework_pb2
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 import paddle.fluid.proto.framework_pb2 as framework_pb2
def get_all_op_protos(): def get_all_op_protos():
......
...@@ -17,7 +17,8 @@ from collections import defaultdict ...@@ -17,7 +17,8 @@ from collections import defaultdict
import framework import framework
import layers import layers
from backward import append_backward from backward import append_backward
from framework import unique_name, program_guard from framework import program_guard
import unique_name
from initializer import Constant from initializer import Constant
from layer_helper import LayerHelper from layer_helper import LayerHelper
from regularizer import append_regularization_ops from regularizer import append_regularization_ops
...@@ -49,7 +50,7 @@ class Optimizer(object): ...@@ -49,7 +50,7 @@ class Optimizer(object):
def _create_global_learning_rate(self): def _create_global_learning_rate(self):
if isinstance(self._global_learning_rate, float): if isinstance(self._global_learning_rate, float):
self._global_learning_rate = layers.create_global_var( self._global_learning_rate = layers.create_global_var(
name=unique_name("learning_rate"), name=unique_name.generate("learning_rate"),
shape=[1], shape=[1],
value=float(self._global_learning_rate), value=float(self._global_learning_rate),
dtype='float32', dtype='float32',
...@@ -118,7 +119,7 @@ class Optimizer(object): ...@@ -118,7 +119,7 @@ class Optimizer(object):
assert isinstance(self.helper, LayerHelper) assert isinstance(self.helper, LayerHelper)
var = self.helper.create_global_variable( var = self.helper.create_global_variable(
name=unique_name(name), name=unique_name.generate(name),
persistable=True, persistable=True,
dtype=dtype or param.dtype, dtype=dtype or param.dtype,
type=param.type, type=param.type,
...@@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer): ...@@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer):
# Create beta1 and beta2 power tensors # Create beta1 and beta2 power tensors
beta_shape = [1] beta_shape = [1]
self._beta1_pow_acc = self.helper.create_global_variable( self._beta1_pow_acc = self.helper.create_global_variable(
name=unique_name('beta1_pow_acc'), name=unique_name.generate('beta1_pow_acc'),
dtype='float32', dtype='float32',
shape=beta_shape, shape=beta_shape,
lod_level=0, lod_level=0,
...@@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer): ...@@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer):
self._beta1_pow_acc, initializer=Constant(self._beta1)) self._beta1_pow_acc, initializer=Constant(self._beta1))
self._beta2_pow_acc = self.helper.create_global_variable( self._beta2_pow_acc = self.helper.create_global_variable(
name=unique_name('beta2_pow_acc'), name=unique_name.generate('beta2_pow_acc'),
dtype='float32', dtype='float32',
shape=beta_shape, shape=beta_shape,
lod_level=0, lod_level=0,
...@@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer): ...@@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer):
# Create beta1 power accumulator tensor # Create beta1 power accumulator tensor
beta_shape = [1] beta_shape = [1]
self._beta1_pow_acc = self.helper.create_global_variable( self._beta1_pow_acc = self.helper.create_global_variable(
name=unique_name('beta1_pow_acc'), name=unique_name.generate('beta1_pow_acc'),
dtype='float32', dtype='float32',
shape=beta_shape, shape=beta_shape,
lod_level=0, lod_level=0,
......
...@@ -14,15 +14,15 @@ ...@@ -14,15 +14,15 @@
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
import contextlib import contextlib
import math import math
import sys import sys
import unittest import unittest
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
dict_size = 30000 dict_size = 30000
source_dict_dim = target_dict_dim = dict_size source_dict_dim = target_dict_dim = dict_size
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import contextlib import contextlib
import numpy import numpy
import unittest import unittest
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import contextlib import contextlib
import math import math
import sys import sys
......
...@@ -17,8 +17,8 @@ import math ...@@ -17,8 +17,8 @@ import math
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05 import paddle.v2.dataset.conll05 as conll05
import paddle.v2.fluid as fluid import paddle.fluid as fluid
from paddle.v2.fluid.initializer import init_on_cpu from paddle.fluid.initializer import init_on_cpu
import contextlib import contextlib
import time import time
import unittest import unittest
......
...@@ -15,10 +15,10 @@ import contextlib ...@@ -15,10 +15,10 @@ import contextlib
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.v2.fluid.layers as pd import paddle.fluid.layers as pd
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
import unittest import unittest
dict_size = 30000 dict_size = 30000
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2 as paddle import paddle.v2 as paddle
import sys import sys
import numpy import numpy
......
...@@ -16,12 +16,12 @@ import math ...@@ -16,12 +16,12 @@ import math
import sys import sys
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.v2.fluid.nets as nets import paddle.fluid.nets as nets
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.v2.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
IS_SPARSE = True IS_SPARSE = True
USE_GPU = False USE_GPU = False
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2 as paddle import paddle.v2 as paddle
import contextlib import contextlib
import math import math
...@@ -47,6 +47,46 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, ...@@ -47,6 +47,46 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
return avg_cost, accuracy, prediction return avg_cost, accuracy, prediction
def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,
lstm_size=128):
emb = fluid.layers.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True)
sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh')
rnn = fluid.layers.DynamicRNN()
with rnn.block():
word = rnn.step_input(sentence)
prev_hidden = rnn.memory(value=0.0, shape=[lstm_size])
prev_cell = rnn.memory(value=0.0, shape=[lstm_size])
def gate_common(ipt, hidden, size):
gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True)
gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)
return gate0 + gate1
forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
lstm_size))
cell = forget_gate * prev_cell + input_gate * cell_gate
hidden = output_gate * fluid.layers.tanh(x=cell)
rnn.update_memory(prev_cell, cell)
rnn.update_memory(prev_hidden, hidden)
rnn.output(hidden)
last = fluid.layers.sequence_last_step(rnn())
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
accuracy = fluid.layers.accuracy(input=prediction, label=label)
return avg_cost, accuracy, prediction
def stacked_lstm_net(data, def stacked_lstm_net(data,
label, label,
input_dim, input_dim,
...@@ -270,6 +310,23 @@ class TestUnderstandSentiment(unittest.TestCase): ...@@ -270,6 +310,23 @@ class TestUnderstandSentiment(unittest.TestCase):
use_cuda=True, use_cuda=True,
parallel=True) parallel=True)
@unittest.skip(reason='make CI faster')
def test_dynrnn_lstm_gpu(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=False)
def test_dynrnn_lstm_gpu_parallel(self):
with self.new_program_scope():
main(
self.word_dict,
net_method=dyn_rnn_lstm,
use_cuda=True,
parallel=True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# limitations under the License. # limitations under the License.
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import unittest import unittest
import os import os
import numpy as np import numpy as np
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import os import os
x = fluid.layers.data(name='x', shape=[13], dtype='float32') x = fluid.layers.data(name='x', shape=[13], dtype='float32')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import os import os
import sys import sys
......
...@@ -17,7 +17,7 @@ import math ...@@ -17,7 +17,7 @@ import math
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05 import paddle.v2.dataset.conll05 as conll05
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import time import time
import os import os
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import os import os
PASS_NUM = 100 PASS_NUM = 100
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
import os import os
dict_size = 30000 dict_size = 30000
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import os import os
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32') images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import os import os
BATCH_SIZE = 128 BATCH_SIZE = 128
......
...@@ -15,11 +15,11 @@ ...@@ -15,11 +15,11 @@
import numpy as np import numpy as np
import os import os
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.v2.fluid.nets as nets import paddle.fluid.nets as nets
from paddle.v2.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
IS_SPARSE = True IS_SPARSE = True
BATCH_SIZE = 256 BATCH_SIZE = 256
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import os import os
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import numpy as np import numpy as np
import os import os
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
def stacked_lstm_net(data, def stacked_lstm_net(data,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import math import math
import sys import sys
......
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import sys import sys
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import math import math
import sys import sys
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.core as core import paddle.fluid.core as core
import paddle.v2.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
import math import math
import sys import sys
......
...@@ -20,7 +20,7 @@ import matplotlib ...@@ -20,7 +20,7 @@ import matplotlib
import numpy import numpy
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
matplotlib.use('Agg') matplotlib.use('Agg')
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.core as core import paddle.fluid.core as core
from paddle.v2.fluid.executor import Executor from paddle.fluid.executor import Executor
class TestRoutineOp(unittest.TestCase): class TestRoutineOp(unittest.TestCase):
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle.v2.fluid as fluid import paddle.fluid as fluid
class TestCSPFramework(unittest.TestCase): class TestCSPFramework(unittest.TestCase):
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import paddle.v2 as paddle import paddle.v2 as paddle
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
prog = fluid.framework.Program() prog = fluid.framework.Program()
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.v2.fluid as fluid import paddle.fluid as fluid
def test_converter(): def test_converter():
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import paddle.v2.fluid as fluid import paddle.fluid as fluid
import paddle.v2.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
import unittest import unittest
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册