diff --git a/.travis.yml b/.travis.yml index 44b755ee32d204c883f0d74e7ad0f78380918954..f9b4a7e08315a42a61a58d6c61c45771df962c4d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,6 +50,7 @@ before_install: # protobuf version. - pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker - pip install rarfile + - eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - | function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } script: diff --git a/CMakeLists.txt b/CMakeLists.txt index 79210d043648de5d493f0b998eeb885c993a6106..2b6a80ca43cf131c6886455cb5a86a61246ac17c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -126,7 +126,9 @@ endif(WITH_GPU) add_subdirectory(proto) add_subdirectory(paddle) +add_subdirectory(go/master/c) add_subdirectory(python) +add_subdirectory(go/pserver/cclient) if(WITH_DOC) add_subdirectory(doc) diff --git a/doc/design/cluster_train/pserver_client.md b/doc/design/cluster_train/pserver_client.md index b3e4079010490b69db1de28157f0cab80cad2381..474b8c572cd92fc87e9f7f3f2b19d12cccd158de 100644 --- a/doc/design/cluster_train/pserver_client.md +++ b/doc/design/cluster_train/pserver_client.md @@ -74,14 +74,25 @@ typedef enum { typedef struct { char* name; paddle_element_type element_type; - void* content; + unsigned char* content; int content_len; } paddle_parameter, paddle_gradient; -typedef struct paddle_pserver_client paddle_pserver_client; +typedef int paddle_pserver_client; -paddle_pserver_client* paddle_new_pserver_client(); -void paddle_pserver_client_release(paddle_pserver_client* client); +/** + * @brief creates a pserver client that talks to etcd for coordination. + */ +paddle_pserver_client paddle_new_etcd_pserver_client(char* etcd_addr); + +/** + * @brief creates a pserver client given pserver addresses. + * + * @param pserver_addrs comma-separated pserver addresses. + * @param selected if current pserver client is selected to initialize all parameter servers. + */ +paddle_pserver_client paddle_new_pserver_client(char* pserver_addrs, int selected); +void paddle_pserver_client_release(paddle_pserver_client c); /** * @brief paddle_begin_init_params begins to initialize parameters on @@ -95,7 +106,7 @@ void paddle_pserver_client_release(paddle_pserver_client* client); * @return 1 if the trainer is selected to initialize parameter * servers, otherwise 0. */ -int paddle_begin_init_params(paddle_pserver_client* client); +int paddle_begin_init_params(paddle_pserver_client client); /** * @brief paddle_init_param initializes the parameter on parameter @@ -109,7 +120,7 @@ int paddle_begin_init_params(paddle_pserver_client* client); * @paddle_begin_init_param). Or simply exit the program and wait for * the cluster management system to restart the trainer. */ -int paddle_init_param(paddle_pserver_client* client, paddle_parameter param, const unsigned char* param_config_proto, int config_len); +int paddle_init_param(paddle_pserver_client client, paddle_parameter param, const unsigned char* param_config_proto, int config_len); /** * @brief paddle_finish_init_params tells parameter servers client has @@ -120,7 +131,7 @@ int paddle_init_param(paddle_pserver_client* client, paddle_parameter param, con * @paddle_begin_init_param). Or simply exit the program and wait for * the cluster management system to restart the trainer. */ -int paddle_finish_init_params(paddle_pserver_client* client); +int paddle_finish_init_params(paddle_pserver_client client); /** * @brief paddle_send_grads sends gradients to parameter servers for @@ -131,7 +142,7 @@ int paddle_finish_init_params(paddle_pserver_client* client); * @param learning_rate the learning rate for the gradients. * @return 0 if successful, otherwise -1. */ -int paddle_send_grads(paddle_pserver_client* client, const paddle_gradient* grads, int len); +int paddle_send_grads(paddle_pserver_client client, const paddle_gradient* grads, int len); /** * @brief paddle_get_params gets parameters from parameter servers. @@ -139,13 +150,15 @@ int paddle_send_grads(paddle_pserver_client* client, const paddle_gradient* grad * paddle_get_params will block until parameters are initialized on * the parameter servers. * - * @param names the array of names of the parameters to get. - * @param dst the destination array of parameters to save to. + * @param dst the destination array of parameter pointers to save to. + * The parameter pointer must be pre-popullated with required parameter name, + * and the content of parameter must be pre-allocated of the size of required + * parameter on pserver. * @param len the length of the names array and the paddle_parameter * array. * @return 0 if successful, otherwise -1. */ -int paddle_get_params(paddle_pserver_client* client, const char** names, paddle_parameter* dst, int len); +int paddle_get_params(paddle_pserver_client client, paddle_parameter** dst, int len); /** * @brief paddle_save_model indicates parameters to save the parameter @@ -154,5 +167,5 @@ int paddle_get_params(paddle_pserver_client* client, const char** names, paddle_ * @param path the path to save parameters. * @return 0 if successful, otherwise -1. */ -int paddle_save_model(paddle_pserver_client* client, const char* path); +int paddle_save_model(paddle_pserver_client client, const char* path); ``` diff --git a/doc/design/cluster_train/remote_parameter_updater.md b/doc/design/cluster_train/remote_parameter_updater.md new file mode 100644 index 0000000000000000000000000000000000000000..6e8e5938455b869e0f3367794c41250340b37f77 --- /dev/null +++ b/doc/design/cluster_train/remote_parameter_updater.md @@ -0,0 +1,21 @@ +# Design Doc: Remote Parameter Updater for Cluster Train + +For an overview of distribute training, please refer to [distributed training design doc](README.md). In this design doc, we will discuss the parameter updater that will use parameter server cclient [The Client Library of Parameter Server Design Doc](pserver_client.md) to manage and update parameters. + +## Parameter Updater + +Parameter Updater is used by trainer to manage and update parameter, there are mainly two kind of parameter updater: local and remote, since this design is for cluster train, we will only discuss remote parameter updater here. + +### Remote Parameter Updater + +Remote Parameter Updater manage parameters through remote parameter server with the client that communicate with pserver([The Client Library of Parameter Server Design Doc](pserver_client.md)) + +In PaddlePaddle Python V2 API, trainer is implemented in python, and the trainer will hold a instance of parameter updater and call it's functions directly. In this design, we will also expose the api of RemoteParameterUpdater to python with swig. + +#### Sparse Remote Parameter Updater + +Since we will only implement dense parameter management new, the mechanism for sparse parameter will be discussed in next stage. + +### Interface Design + +TBD diff --git a/doc/getstarted/build_and_install/build_from_source_en.md b/doc/getstarted/build_and_install/build_from_source_en.md index 69f4501f370dcc9d603ec54a63d68568d66e832e..c0608ede8e57b224dae4b3d510d704a8b0918b53 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.md +++ b/doc/getstarted/build_and_install/build_from_source_en.md @@ -22,6 +22,7 @@ To compile the source code, your computer must be equipped with the following de - **CMake**: CMake >= 3.0 (at least CMake 3.4 on Mac OS X) - **BLAS**: MKL, OpenBlas or ATLAS - **Python**: only support Python 2.7 +- **Go** **Note:** For CUDA 7.0 and CUDA 7.5, GCC 5.0 and up are not supported! For CUDA 8.0, GCC versions later than 5.3 are not supported! @@ -107,6 +108,18 @@ As a simple example, consider the following: sudo apt-get install -y python python-pip python-numpy libpython-dev bison sudo pip install 'protobuf==3.1.0.post1' + # Install Go + # You can follow https://golang.org/doc/install for a detailed explanation. + wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ + tar -C $HOME -xzf go.tgz && \ + mkdir $HOME/gopath && \ + rm go.tgz + + # Setup environment variables + export GOROOT=$HOME/go + export GOPATH=$HOME/gopath + export PATH=$PATH:$GOROOT/bin + # install cmake 3.4 curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \ cd cmake-3.4.1 && ./bootstrap && make -j4 && sudo make install && \ diff --git a/doc/howto/deep_model/rnn/index_cn.rst b/doc/howto/deep_model/rnn/index_cn.rst index 9e805ca85191b793c8798a239927a318c70b96f5..9ecab5594cff47cde4700b7ce0f58013a960a16e 100644 --- a/doc/howto/deep_model/rnn/index_cn.rst +++ b/doc/howto/deep_model/rnn/index_cn.rst @@ -4,6 +4,7 @@ RNN相关模型 .. toctree:: :maxdepth: 1 + rnn_config_cn.rst recurrent_group_cn.md hierarchical_layer_cn.rst hrnn_rnn_api_compare_cn.rst diff --git a/doc/howto/deep_model/rnn/index_en.rst b/doc/howto/deep_model/rnn/index_en.rst index 13a153b05c578e0af82ee29db5ea27fd4b6d6f59..7adc79873d699fdfd5a85034bcef964dd1f19132 100644 --- a/doc/howto/deep_model/rnn/index_en.rst +++ b/doc/howto/deep_model/rnn/index_en.rst @@ -1,2 +1,7 @@ RNN Models ========== + +.. toctree:: + :maxdepth: 1 + + rnn_config_en.rst diff --git a/doc/howto/deep_model/rnn/rnn_config_cn.rst b/doc/howto/deep_model/rnn/rnn_config_cn.rst index ac2bd0775f4ab2e0a0c37462e2c23001123b152b..4d684cf8ad5a8082cf31fb27027119b3d3e700b6 100644 --- a/doc/howto/deep_model/rnn/rnn_config_cn.rst +++ b/doc/howto/deep_model/rnn/rnn_config_cn.rst @@ -5,36 +5,13 @@ RNN配置 中配置循环神经网络(RNN)。PaddlePaddle 高度支持灵活和高效的循环神经网络配置。 在本教程中,您将了解如何: -- 准备用来学习循环神经网络的序列数据。 - 配置循环神经网络架构。 - 使用学习完成的循环神经网络模型生成序列。 我们将使用 vanilla 循环神经网络和 sequence to sequence 模型来指导你完成这些步骤。sequence to sequence -模型的代码可以在\ ``demo / seqToseq``\ 找到。 - -准备序列数据 ------------- - -PaddlePaddle -不需要对序列数据进行任何预处理,例如填充。唯一需要做的是将相应类型设置为输入。例如,以下代码段定义了三个输入。 -它们都是序列,它们的大小是\ ``src_dict``\ ,\ ``trg_dict``\ 和\ ``trg_dict``\ : - -.. code:: python - - settings.input_types = [ - integer_value_sequence(len(settings.src_dict)), - integer_value_sequence(len(settings.trg_dict)), - integer_value_sequence(len(settings.trg_dict))] - -在\ ``process``\ 函数中,每个\ ``yield``\ 函数将返回三个整数列表。每个整数列表被视为一个整数序列: - -.. code:: python - - yield src_ids, trg_ids, trg_ids_next - -有关如何编写数据提供程序的更多细节描述,请参考 :ref:`api_pydataprovider2` 。完整的数据提供文件在 -``demo/seqToseq/dataprovider.py``\ 。 +模型的代码可以在 `book/08.machine_translation `_ 找到。 +wmt14数据的提供文件在 `python/paddle/v2/dataset/wmt14.py `_ 。 配置循环神经网络架构 -------------------- @@ -85,19 +62,19 @@ vanilla act=None, rnn_layer_attr=None): def __rnn_step__(ipt): - out_mem = memory(name=name, size=size) - rnn_out = mixed_layer(input = [full_matrix_projection(ipt), - full_matrix_projection(out_mem)], - name = name, - bias_attr = rnn_bias_attr, - act = act, - layer_attr = rnn_layer_attr, - size = size) + out_mem = paddle.layer.memory(name=name, size=size) + rnn_out = paddle.layer.mixed(input = [paddle.layer.full_matrix_projection(input=ipt), + paddle.layer.full_matrix_projection(input=out_mem)], + name = name, + bias_attr = rnn_bias_attr, + act = act, + layer_attr = rnn_layer_attr, + size = size) return rnn_out - return recurrent_group(name='%s_recurrent_group' % name, - step=__rnn_step__, - reverse=reverse, - input=input) + return paddle.layer.recurrent_group(name='%s_recurrent_group' % name, + step=__rnn_step__, + reverse=reverse, + input=input) PaddlePaddle 使用“Memory”(记忆模块)实现单步函数。\ **Memory**\ 是在PaddlePaddle中构造循环神经网络时最重要的概念。 @@ -140,43 +117,52 @@ Sequence to Sequence Model with Attention .. code:: python # 定义源语句的数据层 - src_word_id = data_layer(name='source_language_word', size=source_dict_dim) + src_word_id = paddle.layer.data( + name='source_language_word', + type=paddle.data_type.integer_value_sequence(source_dict_dim)) # 计算每个词的词向量 - src_embedding = embedding_layer( + src_embedding = paddle.layer.embedding( input=src_word_id, size=word_vector_dim, - param_attr=ParamAttr(name='_source_language_embedding')) + param_attr=paddle.attr.ParamAttr(name='_source_language_embedding')) # 应用前向循环神经网络 - src_forward = grumemory(input=src_embedding, size=encoder_size) + src_forward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size) # 应用反向递归神经网络(reverse=True表示反向循环神经网络) - src_backward = grumemory(input=src_embedding, - size=encoder_size, - reverse=True) + src_backward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size, reverse=True) # 将循环神经网络的前向和反向部分混合在一起 - encoded_vector = concat_layer(input=[src_forward, src_backward]) + encoded_vector = paddle.layer.concat(input=[src_forward, src_backward]) # 投射编码向量到 decoder_size - encoder_proj = mixed_layer(input = [full_matrix_projection(encoded_vector)], - size = decoder_size) + encoded_proj = paddle.layer.mixed( + size=decoder_size, + input=paddle.layer.full_matrix_projection(encoded_vector)) # 计算反向RNN的第一个实例 - backward_first = first_seq(input=src_backward) + backward_first = paddle.layer.first_seq(input=src_backward) # 投射反向RNN的第一个实例到 decoder size - decoder_boot = mixed_layer(input=[full_matrix_projection(backward_first)], size=decoder_size, act=TanhActivation()) + decoder_boot = paddle.layer.mixed( + size=decoder_size, + act=paddle.activation.Tanh(), + input=paddle.layer.full_matrix_projection(backward_first)) 解码器使用 ``recurrent_group`` 来定义循环神经网络。单步函数和输出函数在 ``gru_decoder_with_attention`` 中定义: .. code:: python - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] - trg_embedding = embedding_layer( - input=data_layer(name='target_language_word', - size=target_dict_dim), - size=word_vector_dim, - param_attr=ParamAttr(name='_target_language_embedding')) + group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) + group_input2 = paddle.layer.StaticInput(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] + trg_embedding = paddle.layer.embedding( + input=paddle.layer.data( + name='target_language_word', + type=paddle.data_type.integer_value_sequence(target_dict_dim)), + size=word_vector_dim, + param_attr=paddle.attr.ParamAttr(name='_target_language_embedding')) + group_inputs.append(trg_embedding) group_inputs.append(trg_embedding) # 对于配备有注意力机制的解码器,在训练中, @@ -185,9 +171,10 @@ Sequence to Sequence Model with Attention # StaticInput 意味着不同时间步的输入都是相同的值, # 否则它以一个序列输入,不同时间步的输入是不同的。 # 所有输入序列应该有相同的长度。 - decoder = recurrent_group(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs) + decoder = paddle.layer.recurrent_group( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) 单步函数的实现如下所示。首先,它定义解码网络的\ **Memory**\ 。然后定义 attention,门控循环单元单步函数和输出函数: @@ -198,27 +185,32 @@ attention,门控循环单元单步函数和输出函数: # 定义解码器的Memory # Memory的输出定义在 gru_step 内 # 注意 gru_step 应该与它的Memory名字相同 - decoder_mem = memory(name='gru_decoder', - size=decoder_size, - boot_layer=decoder_boot) + decoder_mem = paddle.layer.memory( + name='gru_decoder', size=decoder_size, boot_layer=decoder_boot) # 计算 attention 加权编码向量 - context = simple_attention(encoded_sequence=enc_vec, - encoded_proj=enc_proj, - decoder_state=decoder_mem) + context = paddle.networks.simple_attention( + encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem) # 混合当前词向量和attention加权编码向量 - decoder_inputs = mixed_layer(inputs = [full_matrix_projection(context), - full_matrix_projection(current_word)], - size = decoder_size * 3) + decoder_inputs = paddle.layer.mixed( + size=decoder_size * 3, + input=[ + paddle.layer.full_matrix_projection(input=context), + paddle.layer.full_matrix_projection(input=current_word) + ]) # 定义门控循环单元循环神经网络单步函数 - gru_step = gru_step_layer(name='gru_decoder', - input=decoder_inputs, - output_mem=decoder_mem, - size=decoder_size) + gru_step = paddle.layer.gru_step( + name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) # 定义输出函数 - out = mixed_layer(input=[full_matrix_projection(input=gru_step)], - size=target_dict_dim, - bias_attr=True, - act=SoftmaxActivation()) + out = paddle.layer.mixed( + size=target_dict_dim, + bias_attr=True, + act=paddle.activation.Softmax(), + input=paddle.layer.full_matrix_projection(input=gru_step)) return out 生成序列 @@ -238,41 +230,32 @@ attention,门控循环单元单步函数和输出函数: - ``beam_size``: beam search 算法中的beam大小。 - ``max_length``: 生成序列的最大长度。 -- 使用 ``seqtext_printer_evaluator`` - 根据索引矩阵和字典打印文本。这个函数需要设置: - - - ``id_input``: 数据的整数ID,用于标识生成的文件中的相应输出。 - - ``dict_file``: 用于将词ID转换为词的字典文件。 - - ``result_file``: 生成结果文件的路径。 - 代码如下: .. code:: python - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] + group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) + group_input2 = paddle.layer.StaticInput(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] # 在生成时,解码器基于编码源序列和最后生成的目标词预测下一目标词。 # 编码源序列(编码器输出)必须由只读Memory的 StaticInput 指定。 # 这里, GeneratedInputs 自动获取上一个生成的词,并在最开始初始化为起始词,如 。 - trg_embedding = GeneratedInput( - size=target_dict_dim, - embedding_name='_target_language_embedding', - embedding_size=word_vector_dim) + trg_embedding = paddle.layer.GeneratedInput( + size=target_dict_dim, + embedding_name='_target_language_embedding', + embedding_size=word_vector_dim) group_inputs.append(trg_embedding) - beam_gen = beam_search(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs, - bos_id=0, # Beginnning token. - eos_id=1, # End of sentence token. - beam_size=beam_size, - max_length=max_length) - - seqtext_printer_evaluator(input=beam_gen, - id_input=data_layer(name="sent_id", size=1), - dict_file=trg_dict_path, - result_file=gen_trans_file) - outputs(beam_gen) - -注意,这种生成技术只用于类似解码器的生成过程。如果你正在处理序列标记任务,请参阅 :ref:`semantic_role_labeling` 了解更多详细信息。 - -完整的配置文件在\ ``demo/seqToseq/seqToseq_net.py``\ 。 + beam_gen = paddle.layer.beam_search( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs, + bos_id=0, # Beginnning token. + eos_id=1, # End of sentence token. + beam_size=beam_size, + max_length=max_length) + + return beam_gen + +注意,这种生成技术只用于类似解码器的生成过程。如果你正在处理序列标记任务,请参阅 `book/06.understand_sentiment `_ 了解更多详细信息。 + +完整的配置文件在 `book/08.machine_translation/train.py `_ 。 diff --git a/doc/howto/deep_model/rnn/rnn_config_en.rst b/doc/howto/deep_model/rnn/rnn_config_en.rst index 73f5d5371fcd3ce95253cad47b0d8e738284441c..2b581290a41005c04cb1d8b6febe57f17d2416d3 100644 --- a/doc/howto/deep_model/rnn/rnn_config_en.rst +++ b/doc/howto/deep_model/rnn/rnn_config_en.rst @@ -3,34 +3,11 @@ RNN Configuration This tutorial will guide you how to configure recurrent neural network in PaddlePaddle. PaddlePaddle supports highly flexible and efficient recurrent neural network configuration. In this tutorial, you will learn how to: -- prepare sequence data for learning recurrent neural networks. - configure recurrent neural network architecture. - generate sequence with learned recurrent neural network models. -We will use vanilla recurrent neural network, and sequence to sequence model to guide you through these steps. The code of sequence to sequence model can be found at :code:`demo/seqToseq`. - -===================== -Prepare Sequence Data -===================== - -PaddlePaddle does not need any preprocessing to sequence data, such as padding. The only thing that needs to be done is to set the type of the corresponding type to input. For example, the following code snippets defines three input. All of them are sequences, and the size of them are :code:`src_dict`, :code:`trg_dict`, and :code:`trg_dict`: - -.. code-block:: python - - settings.input_types = [ - integer_value_sequence(len(settings.src_dict)), - integer_value_sequence(len(settings.trg_dict)), - integer_value_sequence(len(settings.trg_dict))] - - -Then at the :code:`process` function, each :code:`yield` function will return three integer lists. Each integer list is treated as a sequence of integers: - -.. code-block:: python - - yield src_ids, trg_ids, trg_ids_next - - -For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`. +We will use vanilla recurrent neural network, and sequence to sequence model to guide you through these steps. The code of sequence to sequence model can be found at `book/08.machine_translation `_ . +And the data preparation of this model can be found at `python/paddle/v2/dataset/wmt14.py `_ =============================================== Configure Recurrent Neural Network Architecture @@ -75,19 +52,19 @@ Its **output function** simply takes :math:`x_t` as the output. act=None, rnn_layer_attr=None): def __rnn_step__(ipt): - out_mem = memory(name=name, size=size) - rnn_out = mixed_layer(input = [full_matrix_projection(ipt), - full_matrix_projection(out_mem)], - name = name, - bias_attr = rnn_bias_attr, - act = act, - layer_attr = rnn_layer_attr, - size = size) + out_mem = paddle.layer.memory(name=name, size=size) + rnn_out = paddle.layer.mixed(input = [paddle.layer.full_matrix_projection(input=ipt), + paddle.layer.full_matrix_projection(input=out_mem)], + name = name, + bias_attr = rnn_bias_attr, + act = act, + layer_attr = rnn_layer_attr, + size = size) return rnn_out - return recurrent_group(name='%s_recurrent_group' % name, - step=__rnn_step__, - reverse=reverse, - input=input) + return paddle.layer.recurrent_group(name='%s_recurrent_group' % name, + step=__rnn_step__, + reverse=reverse, + input=input) PaddlePaddle uses memory to construct step function. **Memory** is the most important concept when constructing recurrent neural networks in PaddlePaddle. A memory is a state that is used recurrently in step functions, such as :math:`x_{t+1} = f_x(x_t)`. One memory contains an **output** and a **input**. The output of memory at the current time step is utilized as the input of the memory at the next time step. A memory can also has a **boot layer**, whose output is utilized as the initial value of the memory. In our case, the output of the gated recurrent unit is employed as the output memory. Notice that the name of the layer :code:`rnn_out` is the same as the name of :code:`out_mem`. This means the output of the layer :code:`rnn_out` (:math:`x_{t+1}`) is utilized as the **output** of :code:`out_mem` memory. @@ -113,43 +90,52 @@ We also project the encoder vector to :code:`decoder_size` dimensional space, ge .. code-block:: python # Define the data layer of the source sentence. - src_word_id = data_layer(name='source_language_word', size=source_dict_dim) + src_word_id = paddle.layer.data( + name='source_language_word', + type=paddle.data_type.integer_value_sequence(source_dict_dim)) # Calculate the word embedding of each word. - src_embedding = embedding_layer( + src_embedding = paddle.layer.embedding( input=src_word_id, size=word_vector_dim, - param_attr=ParamAttr(name='_source_language_embedding')) + param_attr=paddle.attr.ParamAttr(name='_source_language_embedding')) # Apply forward recurrent neural network. - src_forward = grumemory(input=src_embedding, size=encoder_size) + src_forward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size) # Apply backward recurrent neural network. reverse=True means backward recurrent neural network. - src_backward = grumemory(input=src_embedding, - size=encoder_size, - reverse=True) + src_backward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size, reverse=True) # Mix the forward and backward parts of the recurrent neural network together. - encoded_vector = concat_layer(input=[src_forward, src_backward]) + encoded_vector = paddle.layer.concat(input=[src_forward, src_backward]) # Project encoding vector to decoder_size. - encoder_proj = mixed_layer(input = [full_matrix_projection(encoded_vector)], - size = decoder_size) + encoded_proj = paddle.layer.mixed( + size=decoder_size, + input=paddle.layer.full_matrix_projection(encoded_vector)) # Compute the first instance of the backward RNN. - backward_first = first_seq(input=src_backward) + backward_first = paddle.layer.first_seq(input=src_backward) # Project the first instance of backward RNN to decoder size. - decoder_boot = mixed_layer(input=[full_matrix_projection(backward_first)], size=decoder_size, act=TanhActivation()) + decoder_boot = paddle.layer.mixed( + size=decoder_size, + act=paddle.activation.Tanh(), + input=paddle.layer.full_matrix_projection(backward_first)) The decoder uses :code:`recurrent_group` to define the recurrent neural network. The step and output functions are defined in :code:`gru_decoder_with_attention`: .. code-block:: python - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] - trg_embedding = embedding_layer( - input=data_layer(name='target_language_word', - size=target_dict_dim), - size=word_vector_dim, - param_attr=ParamAttr(name='_target_language_embedding')) + group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) + group_input2 = paddle.layer.StaticInput(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] + trg_embedding = paddle.layer.embedding( + input=paddle.layer.data( + name='target_language_word', + type=paddle.data_type.integer_value_sequence(target_dict_dim)), + size=word_vector_dim, + param_attr=paddle.attr.ParamAttr(name='_target_language_embedding')) + group_inputs.append(trg_embedding) group_inputs.append(trg_embedding) # For decoder equipped with attention mechanism, in training, @@ -158,9 +144,10 @@ The decoder uses :code:`recurrent_group` to define the recurrent neural network. # StaticInput means the same value is utilized at different time steps. # Otherwise, it is a sequence input. Inputs at different time steps are different. # All sequence inputs should have the same length. - decoder = recurrent_group(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs) + decoder = paddle.layer.recurrent_group( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) The implementation of the step function is listed as below. First, it defines the **memory** of the decoder network. Then it defines attention, gated recurrent unit step function, and the output function: @@ -171,27 +158,32 @@ The implementation of the step function is listed as below. First, it defines th # Defines the memory of the decoder. # The output of this memory is defined in gru_step. # Notice that the name of gru_step should be the same as the name of this memory. - decoder_mem = memory(name='gru_decoder', - size=decoder_size, - boot_layer=decoder_boot) + decoder_mem = paddle.layer.memory( + name='gru_decoder', size=decoder_size, boot_layer=decoder_boot) # Compute attention weighted encoder vector. - context = simple_attention(encoded_sequence=enc_vec, - encoded_proj=enc_proj, - decoder_state=decoder_mem) + context = paddle.networks.simple_attention( + encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem) # Mix the current word embedding and the attention weighted encoder vector. - decoder_inputs = mixed_layer(inputs = [full_matrix_projection(context), - full_matrix_projection(current_word)], - size = decoder_size * 3) + decoder_inputs = paddle.layer.mixed( + size=decoder_size * 3, + input=[ + paddle.layer.full_matrix_projection(input=context), + paddle.layer.full_matrix_projection(input=current_word) + ]) # Define Gated recurrent unit recurrent neural network step function. - gru_step = gru_step_layer(name='gru_decoder', - input=decoder_inputs, - output_mem=decoder_mem, - size=decoder_size) + gru_step = paddle.layer.gru_step( + name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) # Defines the output function. - out = mixed_layer(input=[full_matrix_projection(input=gru_step)], - size=target_dict_dim, - bias_attr=True, - act=SoftmaxActivation()) + out = paddle.layer.mixed( + size=target_dict_dim, + bias_attr=True, + act=paddle.activation.Softmax(), + input=paddle.layer.full_matrix_projection(input=gru_step)) return out @@ -207,45 +199,37 @@ After training the model, we can use it to generate sequences. A common practice - :code:`eos_id`: the end token. Every sentence ends with the end token. - :code:`beam_size`: the beam size used in beam search. - :code:`max_length`: the maximum length of the generated sentences. - -* use :code:`seqtext_printer_evaluator` to print text according to index matrix and dictionary. This function needs to set: - - - :code:`id_input`: the integer ID of the data, used to identify the corresponding output in the generated files. - - :code:`dict_file`: the dictionary file for converting word id to word. - - :code:`result_file`: the path of the generation result file. The code is listed below: .. code-block:: python - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] + group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) + group_input2 = paddle.layer.StaticInput(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] # In generation, decoder predicts a next target word based on # the encoded source sequence and the last generated target word. # The encoded source sequence (encoder's output) must be specified by # StaticInput which is a read-only memory. # Here, GeneratedInputs automatically fetchs the last generated word, # which is initialized by a start mark, such as . - trg_embedding = GeneratedInput( - size=target_dict_dim, - embedding_name='_target_language_embedding', - embedding_size=word_vector_dim) + trg_embedding = paddle.layer.GeneratedInput( + size=target_dict_dim, + embedding_name='_target_language_embedding', + embedding_size=word_vector_dim) group_inputs.append(trg_embedding) - beam_gen = beam_search(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs, - bos_id=0, # Beginnning token. - eos_id=1, # End of sentence token. - beam_size=beam_size, - max_length=max_length) + beam_gen = paddle.layer.beam_search( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs, + bos_id=0, # Beginnning token. + eos_id=1, # End of sentence token. + beam_size=beam_size, + max_length=max_length) - seqtext_printer_evaluator(input=beam_gen, - id_input=data_layer(name="sent_id", size=1), - dict_file=trg_dict_path, - result_file=gen_trans_file) - outputs(beam_gen) + return beam_gen -Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling` for more details. +Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to `book/06.understand_sentiment `_ for more details. -The full configuration file is located at :code:`demo/seqToseq/seqToseq_net.py`. +The full configuration file is located at `book/08.machine_translation/train.py `_ . diff --git a/go/cmake/golang.cmake b/go/cmake/golang.cmake index d38d06de2348821b21109f7dc708314da81111c5..a5a43886f887e495500fa26b3c26fa69c63eded0 100644 --- a/go/cmake/golang.cmake +++ b/go/cmake/golang.cmake @@ -17,7 +17,7 @@ function(GO_LIBRARY NAME BUILD_TYPE) endif() file(GLOB GO_SOURCE RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.go") - file(RELATIVE_PATH rel ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) + file(RELATIVE_PATH rel ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) # find Paddle directory. get_filename_component(PARENT_DIR ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) @@ -26,25 +26,23 @@ function(GO_LIBRARY NAME BUILD_TYPE) # automatically get all dependencies specified in the source code # for given target. - add_custom_target(goGet env GOPATH=${GOPATH} ${CMAKE_Go_COMPILER} get -d ${rel}/...) + add_custom_target(${NAME}_goGet env GOPATH=${GOPATH} ${CMAKE_Go_COMPILER} get -d ${rel}/...) # make a symlink that references Paddle inside $GOPATH, so go get # will use the local changes in Paddle rather than checkout Paddle # in github. - add_custom_target(copyPaddle - COMMAND ln -sf ${PADDLE_DIR} ${PADDLE_IN_GOPATH}) - add_dependencies(goGet copyPaddle) + add_custom_target(${NAME}_copyPaddle + COMMAND rm -rf ${PADDLE_IN_GOPATH}/Paddle + COMMAND ln -sf ${PADDLE_DIR} ${PADDLE_IN_GOPATH}/Paddle) + add_dependencies(${NAME}_goGet ${NAME}_copyPaddle) add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp COMMAND env GOPATH=${GOPATH} ${CMAKE_Go_COMPILER} build ${BUILD_MODE} - -o "${CMAKE_CURRENT_BINARY_DIR}/${LIB_NAME}" + -o "${CMAKE_CURRENT_BINARY_DIR}/${LIB_NAME}" ${CMAKE_GO_FLAGS} ${GO_SOURCE} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) add_custom_target(${NAME} ALL DEPENDS ${OUTPUT_DIR}/.timestamp ${ARGN}) - add_dependencies(${NAME} goGet) + add_dependencies(${NAME} ${NAME}_goGet) - if(NOT BUILD_TYPE STREQUAL "STATIC") - install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${LIB_NAME} DESTINATION bin) - endif() endfunction(GO_LIBRARY) diff --git a/go/cmd/master/master.go b/go/cmd/master/master.go index d1f3d7d76c438670faf6677b01e790c5ebe1f2cb..25cd1cafcdf328094a019638f37f908591f5f374 100644 --- a/go/cmd/master/master.go +++ b/go/cmd/master/master.go @@ -1,80 +1,32 @@ package main import ( - "fmt" "net" "net/http" "net/rpc" - "os" - "path/filepath" "strconv" - "strings" "time" "github.com/namsral/flag" "github.com/PaddlePaddle/Paddle/go/master" - "github.com/PaddlePaddle/recordio" ) func main() { port := flag.Int("port", 8080, "port of the master server.") - dataset := flag.String("training_dataset", "", "dataset: comma separated path to RecordIO paths, supports golb patterns.") + faultTolerance := flag.Bool("fault_tolerance", false, "enable fault tolerance (requires etcd).") taskTimeoutDur := flag.Duration("task_timout_dur", 20*time.Minute, "task timout duration.") taskTimeoutMax := flag.Int("task_timeout_max", 3, "max timtout count for each task before it being declared failed task.") chunkPerTask := flag.Int("chunk_per_task", 10, "chunk per task.") flag.Parse() - if *dataset == "" { - panic("no dataset specified.") - } - if *faultTolerance { panic("fault tolernance not implemented.") - } - - var chunks []master.Chunk - var paths []string - ss := strings.Split(*dataset, ",") - fmt.Println(ss) - for _, s := range ss { - match, err := filepath.Glob(s) - if err != nil { - panic(err) - } - paths = append(paths, match...) - } - - if len(paths) == 0 { - panic("no valid datset specified.") - } - - idx := 0 - for _, path := range paths { - f, err := os.Open(path) - if err != nil { - panic(err) - } - - index, err := recordio.LoadIndex(f) - if err != nil { - panic(err) - } - f.Close() - count := index.NumChunks() - for i := 0; i < count; i++ { - chunk := master.Chunk{ - Idx: idx, - Path: path, - Index: *index.ChunkIndex(i), - } - chunks = append(chunks, chunk) - } } - s := master.NewService(chunks, *chunkPerTask, *taskTimeoutDur, *taskTimeoutMax) + s := master.NewService(*chunkPerTask, *taskTimeoutDur, *taskTimeoutMax) err := rpc.Register(s) if err != nil { panic(err) diff --git a/go/pserver/internal/connection/conn.go b/go/connection/conn.go similarity index 81% rename from go/pserver/internal/connection/conn.go rename to go/connection/conn.go index 1c04f117254054741b7d45fb16462b5ce84a2aea..977e8cc123707dbcf055bb77399adbc232c575a0 100644 --- a/go/pserver/internal/connection/conn.go +++ b/go/connection/conn.go @@ -4,6 +4,8 @@ import ( "errors" "net/rpc" "sync" + + log "github.com/sirupsen/logrus" ) // TODO(helin): add TCP re-connect logic @@ -21,6 +23,18 @@ func New() *Conn { return c } +// Close closes the connection. +func (c *Conn) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.client == nil { + return nil + } + + return c.client.Close() +} + // Connect connects the connection to a address. func (c *Conn) Connect(addr string) error { c.mu.Lock() @@ -50,12 +64,20 @@ func (c *Conn) Connect(addr string) error { c.waitConn = nil } } else { + err := client.Close() + if err != nil { + log.Errorln(err) + } + return errors.New("client already set from a concurrent goroutine") } return nil } +// TODO(helin): refactor Call to be able to perform given retry +// policy. + // Call make a RPC call. // // Call will be blocked until the connection to remote RPC service diff --git a/go/master/c/CMakeLists.txt b/go/master/c/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..acce698051ec7217d60a40b3d9cdc98fb1499653 --- /dev/null +++ b/go/master/c/CMakeLists.txt @@ -0,0 +1,21 @@ +cmake_minimum_required(VERSION 3.0) + +get_filename_component(PARENT_DIR ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(PARENT_DIR ${PARENT_DIR} DIRECTORY) +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PARENT_DIR}/cmake") + +project(cxx_go C Go) + +include(golang) +include(flags) + +set(MASTER_LIB_NAME "paddle_master") +go_library(${MASTER_LIB_NAME} SHARED) + +if(PROJ_ROOT) + add_custom_command(OUTPUT ${PROJ_ROOT}/python/paddle/v2/master/lib${MASTER_LIB_NAME}.so + COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/lib${MASTER_LIB_NAME}.h + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/lib${MASTER_LIB_NAME}.so ${PROJ_ROOT}/python/paddle/v2/master/ + DEPENDS ${MASTER_LIB_NAME}) + add_custom_target(paddle_master_shared ALL DEPENDS ${PROJ_ROOT}/python/paddle/v2/master/lib${MASTER_LIB_NAME}.so) +endif(PROJ_ROOT) diff --git a/go/master/c/client.go b/go/master/c/client.go new file mode 100644 index 0000000000000000000000000000000000000000..b186474dc33138aeb02a2ffe34418b379b7a2db0 --- /dev/null +++ b/go/master/c/client.go @@ -0,0 +1,110 @@ +package main + +/* +#include +#include +#include + +#define PADDLE_MASTER_OK 0 +#define PADDLE_MASTER_ERROR -1 + +typedef int paddle_master_client; +*/ +import "C" + +import ( + "sync" + "unsafe" + + "github.com/PaddlePaddle/Paddle/go/master" + log "github.com/sirupsen/logrus" +) + +var nullPtr = unsafe.Pointer(uintptr(0)) +var mu sync.Mutex +var handleMap = make(map[C.paddle_master_client]*master.Client) +var curHandle C.paddle_master_client + +func add(c *master.Client) C.paddle_master_client { + mu.Lock() + defer mu.Unlock() + client := curHandle + curHandle++ + handleMap[client] = c + return client +} + +func get(client C.paddle_master_client) *master.Client { + mu.Lock() + defer mu.Unlock() + return handleMap[client] +} + +func remove(client C.paddle_master_client) *master.Client { + mu.Lock() + defer mu.Unlock() + h := handleMap[client] + delete(handleMap, client) + return h +} + +type addresser string + +func (a addresser) Address() string { + return string(a) +} + +//export paddle_new_master_client +func paddle_new_master_client(addr *C.char, bufSize int) C.paddle_master_client { + a := C.GoString(addr) + c := master.NewClient(addresser(a), bufSize) + return add(c) +} + +//export paddle_release_master_client +func paddle_release_master_client(client C.paddle_master_client) { + remove(client) +} + +//export paddle_set_dataset +func paddle_set_dataset(client C.paddle_master_client, path **C.char, size C.int) C.int { + c := get(client) + var paths []string + for i := 0; i < int(size); i++ { + ptr := (**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(path)) + uintptr(i)*unsafe.Sizeof(*path))) + str := C.GoString(*ptr) + paths = append(paths, str) + } + err := c.SetDataset(paths) + if err != nil { + log.Errorln(err) + return C.PADDLE_MASTER_ERROR + } + + return C.PADDLE_MASTER_OK +} + +//export paddle_next_record +func paddle_next_record(client C.paddle_master_client, record **C.uchar) C.int { + c := get(client) + r := c.NextRecord() + if len(r) == 0 { + *record = (*C.uchar)(nullPtr) + return 0 + } + + size := C.size_t(len(r)) + *record = (*C.uchar)(C.malloc(size)) + C.memcpy(unsafe.Pointer(*record), unsafe.Pointer(&r[0]), size) + return C.int(size) +} + +//export mem_free +func mem_free(p unsafe.Pointer) { + // "free" may be a better name for this function, but doing so + // will cause calling any function of this library from Python + // ctypes hanging. + C.free(p) +} + +func main() {} diff --git a/go/master/client.go b/go/master/client.go new file mode 100644 index 0000000000000000000000000000000000000000..8451820c1963dd5a4eff0c3ab7763eb6a8e05ba4 --- /dev/null +++ b/go/master/client.go @@ -0,0 +1,137 @@ +package master + +import ( + "os" + "time" + + "github.com/PaddlePaddle/Paddle/go/connection" + "github.com/PaddlePaddle/recordio" + log "github.com/sirupsen/logrus" +) + +// Addresser provide the address of the master server. +type Addresser interface { + Address() string +} + +// Client is the client of the master server. +type Client struct { + conn *connection.Conn + ch chan []byte +} + +// NewClient creates a new Client. +// +// bufSize is the record buffer size. NextRecord will read from this +// buffer. +func NewClient(addr Addresser, bufSize int) *Client { + c := &Client{} + c.conn = connection.New() + c.ch = make(chan []byte, bufSize) + go c.monitorMaster(addr) + go c.getRecords() + return c +} + +func (c *Client) getRecords() { + for { + t, err := c.getTask() + if err != nil { + // TODO(helin): wait before move on with next + // getTask call. + log.Errorln(err) + continue + } + + for _, chunk := range t.Chunks { + f, err := os.Open(chunk.Path) + if err != nil { + log.Errorln(err) + continue + } + + s := recordio.NewRangeScanner(f, &chunk.Index, -1, -1) + for s.Scan() { + c.ch <- s.Record() + } + + if s.Err() != nil { + log.Errorln(err, chunk.Path) + } + + err = f.Close() + if err != nil { + log.Errorln(err) + } + } + + // We treat a task as finished whenever the last data + // instance of the task is read. This is not exactly + // correct, but a reasonable approximation. + c.taskFinished(t.ID) + } +} + +func (c *Client) monitorMaster(addr Addresser) { + lastMaster := "" + monitor := func() { + // get the lastest address of the master server, + // connect to the new address once address changed. + curMaster := addr.Address() + if curMaster != lastMaster { + if curMaster == "" { + err := c.conn.Close() + if err != nil { + log.Errorln(err) + } + } else { + err := c.conn.Connect(curMaster) + if err != nil { + log.Errorln(err) + + // connect to addr failed, set + // to last known addr in order + // to retry next time. + curMaster = lastMaster + } + + } + } + + lastMaster = curMaster + } + + monitor() + ticker := time.NewTicker(10 * time.Second) + for _ = range ticker.C { + monitor() + } +} + +// SetDataset set dataset for the master server to dispatch. +// +// SetDataset can be call multiple times from different nodes. But +// only the first call will be honored. +func (c *Client) SetDataset(globPaths []string) error { + return c.conn.Call("Service.SetDataset", globPaths, nil) +} + +// getTask gets a new task from the master server. +func (c *Client) getTask() (Task, error) { + var t Task + err := c.conn.Call("Service.GetTask", 0, &t) + return t, err +} + +// TaskFinished tells the master server a task is finished. +func (c *Client) taskFinished(taskID int) error { + return c.conn.Call("Service.TaskFinished", taskID, nil) +} + +// NextRecord returns next record in the dataset. +// +// NextRecord will block until the next record is available. It is +// thread-safe. +func (c *Client) NextRecord() []byte { + return <-c.ch +} diff --git a/go/master/client_internal_test.go b/go/master/client_internal_test.go new file mode 100644 index 0000000000000000000000000000000000000000..00fcca0e2cf44d0f4855fd366a8f80895abf8865 --- /dev/null +++ b/go/master/client_internal_test.go @@ -0,0 +1,121 @@ +package master + +import ( + "fmt" + "net" + "net/http" + "net/rpc" + "os" + "strconv" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/PaddlePaddle/Paddle/go/connection" + "github.com/PaddlePaddle/recordio" +) + +const ( + totalTask = 20 + chunkPerTask = 10 +) + +func init() { + log.SetLevel(log.ErrorLevel) +} + +type TestAddresser string + +func (a TestAddresser) Address() string { + return string(a) +} + +func TestGetFinishTask(t *testing.T) { + const path = "/tmp/master_client_test_0" + + l, err := net.Listen("tcp", ":0") + if err != nil { + panic(err) + } + + ss := strings.Split(l.Addr().String(), ":") + p, err := strconv.Atoi(ss[len(ss)-1]) + if err != nil { + panic(err) + } + + go func(l net.Listener) { + s := NewService(chunkPerTask, time.Second, 1) + server := rpc.NewServer() + err := server.Register(s) + if err != nil { + panic(err) + } + + mux := http.NewServeMux() + mux.Handle(rpc.DefaultRPCPath, server) + err = http.Serve(l, mux) + if err != nil { + panic(err) + } + }(l) + + f, err := os.Create(path) + if err != nil { + panic(err) + } + + for i := 0; i < totalTask*chunkPerTask; i++ { + w := recordio.NewWriter(f, -1, -1) + w.Write(nil) + // call Close to force RecordIO writing a chunk. + w.Close() + } + f.Close() + + // Manually intialize client to avoid calling c.getRecords() + c := &Client{} + c.conn = connection.New() + go c.monitorMaster(TestAddresser(fmt.Sprintf(":%d", p))) + c.SetDataset([]string{path}) + + checkOnePass := func(i int) { + var tasks []Task + for idx := 0; idx < totalTask; idx++ { + task, err := c.getTask() + if err != nil { + t.Fatalf("Error: %v, pass: %d\n", err, i) + } + tasks = append(tasks, task) + } + + _, err = c.getTask() + if err == nil { + t.Fatalf("Should get error, pass: %d\n", i) + } + + err = c.taskFinished(tasks[0].ID) + if err != nil { + t.Fatalf("Error: %v, pass: %d\n", err, i) + } + tasks = tasks[1:] + task, err := c.getTask() + if err != nil { + t.Fatal(err) + } + tasks = append(tasks, task) + + for _, task := range tasks { + err = c.taskFinished(task.ID) + if err != nil { + t.Fatalf("Error: %v, pass: %d\n", err, i) + } + } + } + + for i := 0; i < 10; i++ { + checkOnePass(i) + } +} diff --git a/go/master/client_test.go b/go/master/client_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2b3f873ecf3a650cd91d1d9c20b414b05bbb0cd6 --- /dev/null +++ b/go/master/client_test.go @@ -0,0 +1,79 @@ +package master_test + +import ( + "fmt" + "net" + "net/http" + "net/rpc" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/PaddlePaddle/Paddle/go/master" + "github.com/PaddlePaddle/recordio" +) + +func TestNextRecord(t *testing.T) { + const ( + path = "/tmp/master_client_TestFull" + total = 50 + ) + + l, err := net.Listen("tcp", ":0") + if err != nil { + panic(err) + } + + ss := strings.Split(l.Addr().String(), ":") + p, err := strconv.Atoi(ss[len(ss)-1]) + if err != nil { + panic(err) + } + + go func(l net.Listener) { + s := master.NewService(10, time.Second, 1) + server := rpc.NewServer() + err := server.Register(s) + if err != nil { + panic(err) + } + + mux := http.NewServeMux() + mux.Handle(rpc.DefaultRPCPath, server) + err = http.Serve(l, mux) + if err != nil { + panic(err) + } + }(l) + + f, err := os.Create(path) + if err != nil { + panic(err) + } + + w := recordio.NewWriter(f, -1, -1) + for i := 0; i < total; i++ { + w.Write([]byte{byte(i)}) + } + w.Close() + f.Close() + + c := master.NewClient(master.TestAddresser(fmt.Sprintf(":%d", p)), 10) + c.SetDataset([]string{path}) + + for pass := 0; pass < 50; pass++ { + received := make(map[byte]bool) + for i := 0; i < total; i++ { + r := c.NextRecord() + if len(r) != 1 { + t.Fatal("Length should be 1.", r) + } + if received[r[0]] { + t.Fatal("Received duplicate.", received, r) + } + received[r[0]] = true + } + } +} diff --git a/go/master/service.go b/go/master/service.go index ab17a62f3854c1e32d731037fcc9857260d03781..55e1e2d1a4a5cd6f5d5797b247e2ebe433607576 100644 --- a/go/master/service.go +++ b/go/master/service.go @@ -2,29 +2,25 @@ package master import ( "errors" - "log" + "os" + "path/filepath" "sync" "time" - "github.com/PaddlePaddle/recordio" -) + log "github.com/sirupsen/logrus" -const ( - targetTaskCount = 300 -) - -// errors -var ( - ErrNoMoreTask = errors.New("no more task for current pass") - ErrPendingTaskNotFound = errors.New("pending task not found") + "github.com/PaddlePaddle/recordio" ) // Service is the master server service. type Service struct { - timeoutDur time.Duration - timeoutMax int + chunksPerTask int + timeoutDur time.Duration + timeoutMax int + ready chan struct{} mu sync.Mutex + initDone bool taskQueues taskQueues } @@ -55,7 +51,6 @@ func partition(chunks []Chunk, chunksPerTask int) []taskEntry { if len(cur.Task.Chunks) > 0 { cur.Task.ID = id - id++ result = append(result, cur) } @@ -63,21 +58,21 @@ func partition(chunks []Chunk, chunksPerTask int) []taskEntry { } // NewService creates a new service. -func NewService(chunks []Chunk, chunksPerTask int, timeoutDur time.Duration, timeoutMax int) *Service { +func NewService(chunksPerTask int, timeoutDur time.Duration, timeoutMax int) *Service { s := &Service{} + s.chunksPerTask = chunksPerTask s.timeoutDur = timeoutDur s.timeoutMax = timeoutMax s.taskQueues = taskQueues{} s.taskQueues.Pending = make(map[int]taskEntry) - s.taskQueues.Todo = partition(chunks, chunksPerTask) + s.ready = make(chan struct{}) return s } // Chunk is a chunk of data consisted of several data instances. type Chunk struct { - Idx int // index of the chunk within the file Path string - Index recordio.Index // block index + Index recordio.Index // chunk index } // Task is the basic unit of data instances assigned to trainers. @@ -105,74 +100,215 @@ func (s *Service) snapshot() error { return nil } -// GetTask gets a new task from the service. -func (s *Service) GetTask(dummy int, task *Task) error { +func readChunks(globPaths []string) ([]Chunk, error) { + var chunks []Chunk + var paths []string + + for _, s := range globPaths { + match, err := filepath.Glob(s) + if err != nil { + return nil, err + } + paths = append(paths, match...) + } + + if len(paths) == 0 { + return nil, errors.New("no valid dataset specified") + } + + for _, path := range paths { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + index, err := recordio.LoadIndex(f) + if err != nil { + return nil, err + } + err = f.Close() + if err != nil { + return nil, err + } + + count := index.NumChunks() + for i := 0; i < count; i++ { + chunk := Chunk{ + Path: path, + Index: *index.ChunkIndex(i), + } + chunks = append(chunks, chunk) + } + } + + return chunks, nil +} + +// SetDataset sets dataset to dispatch for the master server. +// +// SetDataset can be call multiple times. But only the first call will +// be honored. +func (s *Service) SetDataset(globPaths []string, dummy *int) error { + if len(globPaths) == 0 { + return errors.New("no dataset specified") + } + s.mu.Lock() defer s.mu.Unlock() + if s.initDone { + // Already initialized. All trainer will call + // SetDataset, but we only handle the first one. Treat + // other calls as successful but do nothing. + return nil + } - if len(s.taskQueues.Todo) == 0 { - return ErrNoMoreTask + chunks, err := readChunks(globPaths) + if err != nil { + return err } - t := s.taskQueues.Todo[0] - t.Epoch++ - s.taskQueues.Todo = s.taskQueues.Todo[1:] - s.taskQueues.Pending[t.Task.ID] = t - err := s.snapshot() + s.taskQueues.Todo = partition(chunks, s.chunksPerTask) + + err = s.snapshot() if err != nil { + log.Errorln(err) return err } - time.AfterFunc(s.timeoutDur, func(taskID int, epoch int) func() { - return func() { - s.mu.Lock() - defer s.mu.Unlock() + close(s.ready) + s.initDone = true + return nil +} - t, ok := s.taskQueues.Pending[taskID] - if !ok { - return - } +func (s *Service) checkTimeoutFunc(taskID int, epoch int) func() { + return func() { + s.mu.Lock() + defer s.mu.Unlock() + + t, ok := s.taskQueues.Pending[taskID] + if !ok { + return + } + + if t.Epoch != epoch { + // new epoch, task launched after the + // schedule of this timeout check. + return + } - if t.Epoch != epoch { - // new epoch, task launched after the - // schedule of this timeout check. - return + defer func() { + err := s.snapshot() + if err != nil { + log.Errorln(err) } + }() + + delete(s.taskQueues.Pending, t.Task.ID) + + t.NumTimeout++ + if t.NumTimeout > s.timeoutMax { + log.Warningf("Task %v timed out %d times, discard.\n", t.Task, t.NumTimeout) + s.taskQueues.Failed = append(s.taskQueues.Failed, t.Task) + return + } + + log.Warningf("Task %v timed out %d times, retry.\n", t.Task, t.NumTimeout) + s.taskQueues.Todo = append(s.taskQueues.Todo, t) + } +} + +// must be called with lock held. +func (s *Service) logFields() log.Fields { + return log.Fields{ + "todoLen": len(s.taskQueues.Todo), + "pendingLen": len(s.taskQueues.Pending), + "doneLen": len(s.taskQueues.Done), + "failedLen": len(s.taskQueues.Failed), + } +} - defer func() { - err := s.snapshot() - if err != nil { - log.Println(err) - } - }() +// GetTask gets a new task from the service. +func (s *Service) GetTask(dummy int, task *Task) error { + select { + case <-s.ready: + } - delete(s.taskQueues.Pending, t.Task.ID) + s.mu.Lock() + defer s.mu.Unlock() - t.NumTimeout++ - if t.NumTimeout > s.timeoutMax { - s.taskQueues.Failed = append(s.taskQueues.Failed, t.Task) - return + if len(s.taskQueues.Todo) == 0 { + if len(s.taskQueues.Done) == 0 { + if len(s.taskQueues.Pending) == 0 { + err := errors.New("all task failed") + log.WithFields(s.logFields()).Warningln("All tasks failed.") + return err } - s.taskQueues.Todo = append(s.taskQueues.Todo, t) + // TODO(helin): client need to retry in this + // error case. Gotcha: RPC client can't + // compare returned error with predefined + // errors like io.EOF, because the error + // instance deserialized from RPC is a + // different instance than the error defined + // in package. So we need to figure out a way + // for client to check this error correctly. + err := errors.New("no more available task") + log.WithFields(s.logFields()).Warningln("No more available task.") + return err } - }(t.Task.ID, t.Epoch)) + s.taskQueues.Todo = s.taskQueues.Done + s.taskQueues.Done = nil + log.WithFields(s.logFields()).Infoln("No more todo task, but trainer is requesting task to do. Move all done task to todo.") + } + + t := s.taskQueues.Todo[0] + t.Epoch++ + s.taskQueues.Todo = s.taskQueues.Todo[1:] + s.taskQueues.Pending[t.Task.ID] = t + err := s.snapshot() + if err != nil { + return err + } + + *task = t.Task + log.WithFields(s.logFields()).Infof("Task #%d dispatched.", task.ID) + + time.AfterFunc(s.timeoutDur, s.checkTimeoutFunc(t.Task.ID, t.Epoch)) return nil } // TaskFinished tell the service that a task is finished. func (s *Service) TaskFinished(taskID int, dummy *int) error { + select { + case <-s.ready: + } + s.mu.Lock() defer s.mu.Unlock() t, ok := s.taskQueues.Pending[taskID] if !ok { - return ErrPendingTaskNotFound + err := errors.New("pending task not found") + log.WithFields(s.logFields()).Warningln("Pending task #%d not found.", taskID) + return err } // task finished, reset timeout t.NumTimeout = 0 s.taskQueues.Done = append(s.taskQueues.Done, t) delete(s.taskQueues.Pending, taskID) - return s.snapshot() + + log.WithFields(s.logFields()).Infof("Task #%d finished.", taskID) + + if len(s.taskQueues.Pending) == 0 && len(s.taskQueues.Todo) == 0 { + log.WithFields(s.logFields()).Infoln("No more todo and pending task, start a new pass.") + s.taskQueues.Todo = append(s.taskQueues.Todo, s.taskQueues.Done...) + s.taskQueues.Done = nil + } + + err := s.snapshot() + if err != nil { + log.Errorln(err) + } + return err } diff --git a/go/pserver/cclient/CMakeLists.txt b/go/pserver/cclient/CMakeLists.txt index c017d7465611373309c6c60141fed864f5ccfb5d..7967af51ee9a94c9e40bf6403fe819ff462d9219 100644 --- a/go/pserver/cclient/CMakeLists.txt +++ b/go/pserver/cclient/CMakeLists.txt @@ -9,5 +9,15 @@ project(cxx_go C Go) include(golang) include(flags) -go_library(client STATIC) +go_library(paddle_pserver_cclient STATIC) + +if(PROJ_ROOT) + add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/trainer/libpaddle_pserver_cclient.a + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_pserver_cclient.h ${PROJ_ROOT}/paddle/trainer/ + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_pserver_cclient.a ${PROJ_ROOT}/paddle/trainer/ + WORKING_DIRECTORY ${PROJ_ROOT}/paddle + DEPENDS paddle_pserver_cclient) + add_custom_target(paddle_pserver_cclient_lib ALL DEPENDS ${PROJ_ROOT}/paddle/trainer/libpaddle_pserver_cclient.a) +endif(PROJ_ROOT) + add_subdirectory(test) diff --git a/go/pserver/cclient/cclient.go b/go/pserver/cclient/cclient.go index 0b4aa79806b72f4608230d2216d1741389913d95..92a41b7f5434842c6318704dd85adf9e51c19944 100644 --- a/go/pserver/cclient/cclient.go +++ b/go/pserver/cclient/cclient.go @@ -1,7 +1,6 @@ package main /* -#include #include typedef enum { PADDLE_ELEMENT_TYPE_INT32 = 0, @@ -19,39 +18,27 @@ typedef struct { int content_len; } paddle_parameter, paddle_gradient; -static inline void paddle_release_param(paddle_parameter* param) { - if (param != NULL) { - if (param->name != NULL) { - free(param->name); - } - - if (param->content != NULL) { - free(param->content); - } - - free(param); - } -} - -typedef int client; +typedef int paddle_pserver_client; +#define PSERVER_ERROR -1 +#define PSERVER_OK 0 */ import "C" import ( - "log" "strings" "sync" "unsafe" "github.com/PaddlePaddle/Paddle/go/pserver" + log "github.com/sirupsen/logrus" ) var nullPtr = unsafe.Pointer(uintptr(0)) var mu sync.Mutex -var handleMap = make(map[C.client]*pserver.Client) -var curHandle C.client +var handleMap = make(map[C.paddle_pserver_client]*pserver.Client) +var curHandle C.paddle_pserver_client -func add(c *pserver.Client) C.client { +func add(c *pserver.Client) C.paddle_pserver_client { mu.Lock() defer mu.Unlock() client := curHandle @@ -60,13 +47,13 @@ func add(c *pserver.Client) C.client { return client } -func get(client C.client) *pserver.Client { +func get(client C.paddle_pserver_client) *pserver.Client { mu.Lock() defer mu.Unlock() return handleMap[client] } -func remove(client C.client) *pserver.Client { +func remove(client C.paddle_pserver_client) *pserver.Client { mu.Lock() defer mu.Unlock() h := handleMap[client] @@ -100,7 +87,7 @@ func (l lister) List() []pserver.Server { } //export paddle_new_pserver_client -func paddle_new_pserver_client(addrs *C.char, selected int) C.client { +func paddle_new_pserver_client(addrs *C.char, selected int) C.paddle_pserver_client { a := C.GoString(addrs) as := strings.Split(a, ",") servers := make([]pserver.Server, len(as)) @@ -113,27 +100,27 @@ func paddle_new_pserver_client(addrs *C.char, selected int) C.client { } //export paddle_new_etcd_pserver_client -func paddle_new_etcd_pserver_client(etcd_addr *C.char) C.client { +func paddle_new_etcd_pserver_client(etcd_addr *C.char) C.paddle_pserver_client { // TODO(helin): fault tolerant pserver client using etcd. panic("not implemented.") } //export paddle_pserver_client_release -func paddle_pserver_client_release(client C.client) { +func paddle_pserver_client_release(client C.paddle_pserver_client) { remove(client) } //export paddle_begin_init_params -func paddle_begin_init_params(client C.client) C.int { +func paddle_begin_init_params(client C.paddle_pserver_client) C.int { c := get(client) if selected := c.BeginInitParams(); selected { return 1 } - return 0 + return C.PSERVER_OK } //export paddle_init_param -func paddle_init_param(client C.client, param C.paddle_parameter, param_config unsafe.Pointer, config_len C.int) C.int { +func paddle_init_param(client C.paddle_pserver_client, param C.paddle_parameter, param_config unsafe.Pointer, config_len C.int) C.int { et := pserver.ElementType(param.element_type) name := C.GoString(param.name) content := cArrayToSlice(unsafe.Pointer(param.content), int(param.content_len)) @@ -143,31 +130,41 @@ func paddle_init_param(client C.client, param C.paddle_parameter, param_config u } c := get(client) err := c.InitParam(pc) + if err != nil { - log.Println(err) - return -1 + if err.Error() == pserver.AlreadyInitialized { + log.Warningf("parameter %s already initialized, treat paddle_init_param as sucessful.\n", name) + return C.PSERVER_OK + } + log.Errorln(err) + return C.PSERVER_ERROR } - return 0 + return C.PSERVER_OK } //export paddle_finish_init_params -func paddle_finish_init_params(client C.client) C.int { +func paddle_finish_init_params(client C.paddle_pserver_client) C.int { c := get(client) err := c.FinishInitParams() if err != nil { - log.Println(err) - return -1 + if err.Error() == pserver.AlreadyInitialized { + log.Warningln("parameters already initialized, treat paddle_finish_init_params as sucessful.") + return C.PSERVER_OK + } + + log.Errorln(err) + return C.PSERVER_ERROR } - return 0 + return C.PSERVER_OK } //export paddle_send_grads -func paddle_send_grads(client C.client, grads *C.paddle_gradient, total C.int) C.int { +func paddle_send_grads(client C.paddle_pserver_client, grads **C.paddle_gradient, total C.int) C.int { var gs []pserver.Gradient for i := 0; i < int(total); i++ { - grad := (*C.paddle_gradient)(unsafe.Pointer((uintptr(unsafe.Pointer(grads)) + uintptr(i)*unsafe.Sizeof(*grads)))) + grad := *(**C.paddle_gradient)(unsafe.Pointer((uintptr(unsafe.Pointer(grads)) + uintptr(i)*unsafe.Sizeof(*grads)))) et := pserver.ElementType(grad.element_type) name := C.GoString(grad.name) content := cArrayToSlice(unsafe.Pointer(grad.content), int(grad.content_len)) @@ -177,84 +174,82 @@ func paddle_send_grads(client C.client, grads *C.paddle_gradient, total C.int) C c := get(client) err := c.SendGrads(gs) if err != nil { - log.Println(err) - return -1 + log.Errorln(err) + return C.PSERVER_ERROR } - return 0 + return C.PSERVER_OK } //export paddle_get_params -func paddle_get_params(client C.client, names **C.char, dst **C.paddle_parameter, total C.int) C.int { +func paddle_get_params(client C.paddle_pserver_client, dst **C.paddle_parameter, total C.int) C.int { var ns []string for i := 0; i < int(total); i++ { - name := *(**C.char)(unsafe.Pointer((uintptr(unsafe.Pointer(names)) + uintptr(i)*unsafe.Sizeof(*names)))) - ns = append(ns, C.GoString(name)) + param := *(**C.paddle_parameter)(unsafe.Pointer((uintptr(unsafe.Pointer(dst)) + uintptr(i)*unsafe.Sizeof(*dst)))) + ns = append(ns, C.GoString(param.name)) } c := get(client) ps, err := c.GetParams(ns) if err != nil { - log.Println(err) - return -1 + log.Errorln(err) + return C.PSERVER_ERROR } - for i := 0; i < int(total); i++ { - if i >= len(ps) { - break + if len(ps) != len(ns) { + pn := make([]string, len(ps)) + for i, p := range ps { + pn[i] = p.Name } + log.Errorf("pserver returned wrong number of parameters. Requested: %s, returned: %s.\n", strings.Join(pn, ", "), strings.Join(ns, ", ")) + return C.PSERVER_ERROR + } + + for i := range ps { + if ns[i] != ps[i].Name { + pn := make([]string, len(ps)) + for i, p := range ps { + pn[i] = p.Name + } + log.Errorf("pserver returned wrong parameters, or not in requested order. Requested: %s, returned: %s.\n", strings.Join(pn, ", "), strings.Join(ns, ", ")) + return C.PSERVER_ERROR + } + } + for i := 0; i < int(total); i++ { p := ps[i] param := *(**C.paddle_parameter)(unsafe.Pointer((uintptr(unsafe.Pointer(dst)) + uintptr(i)*unsafe.Sizeof(*dst)))) - nameReady := false - contentAllocated := false if unsafe.Pointer(param) == nullPtr { - param = (*C.paddle_parameter)(C.calloc(1, C.size_t(unsafe.Sizeof(*param)))) - } else { - if unsafe.Pointer(param.name) != nullPtr { - if n := C.GoString(param.name); n != p.Name { - log.Println("Warning: the pre-allocated parameter name does not match the parameter name, it will be freed.", n, p.Name) - C.free(unsafe.Pointer(param.name)) - } else { - nameReady = true - } - } + log.Errorln("must pre-allocate parameter.") + return C.PSERVER_ERROR + } - if unsafe.Pointer(param.content) != nullPtr { - if int(param.content_len) == len(p.Content) { - contentAllocated = true - } else { - log.Println("Warning: the pre-allocated content len does not match parameter content len, the pre-allocated content will be freed.", param.content_len, len(p.Content)) - C.free(unsafe.Pointer(param.content)) - } + if unsafe.Pointer(param.content) != nullPtr { + if int(param.content_len) != len(p.Content) { + log.Errorf("the pre-allocated content len does not match parameter content len. Pre-allocated len: %d, returned len: %d", param.content_len, len(p.Content)) + return C.PSERVER_ERROR } } - if !nameReady { - param.name = C.CString(p.Name) - } - if !contentAllocated { - param.content = (*C.uchar)(C.malloc(C.size_t(len(p.Content)))) - } C.memcpy(unsafe.Pointer(param.content), unsafe.Pointer(&p.Content[0]), C.size_t(len(p.Content))) param.content_len = C.int(len(p.Content)) param.element_type = C.paddle_element_type(p.ElementType) } - return 0 + return C.PSERVER_OK } //export paddle_save_model -func paddle_save_model(client C.client, path *C.char) C.int { +func paddle_save_model(client C.paddle_pserver_client, path *C.char) C.int { p := C.GoString(path) c := get(client) err := c.Save(p) if err != nil { - log.Println(err) - return -1 + log.Errorln(err) + return C.PSERVER_ERROR } - return 0 + return C.PSERVER_OK } func main() {} // Required but ignored diff --git a/go/pserver/cclient/test/CMakeLists.txt b/go/pserver/cclient/test/CMakeLists.txt index 16f84648c1de3a8fdb4595c00bdb7608a152ded2..1a3dd7e5e9e0ff3273fc2be67c48461797b4a6b3 100644 --- a/go/pserver/cclient/test/CMakeLists.txt +++ b/go/pserver/cclient/test/CMakeLists.txt @@ -1,11 +1,22 @@ cmake_minimum_required(VERSION 3.0) -include_directories(${CMAKE_BINARY_DIR}) - add_executable(main main.c) -add_dependencies(main client) +add_dependencies(main paddle_pserver_cclient) +add_executable(test_cclient test_cclient.c) +add_dependencies(test_cclient paddle_pserver_cclient) if(APPLE) set(CMAKE_EXE_LINKER_FLAGS "-framework CoreFoundation -framework Security") +else() + set(CMAKE_EXE_LINKER_FLAGS "-pthread") endif() -target_link_libraries(main ${CMAKE_BINARY_DIR}/libclient.a) + +if(PROJ_ROOT) + include_directories(${CMAKE_CURRENT_BINARY_DIR}/..) + target_link_libraries(main ${CMAKE_CURRENT_BINARY_DIR}/../libpaddle_pserver_cclient.a pthread) + target_link_libraries(test_cclient ${CMAKE_CURRENT_BINARY_DIR}/../libpaddle_pserver_cclient.a pthread) +else(PROJ_ROOT) + include_directories(${CMAKE_BINARY_DIR}) + target_link_libraries(main ${CMAKE_BINARY_DIR}/libpaddle_pserver_cclient.a pthread) + target_link_libraries(test_cclient ${CMAKE_BINARY_DIR}/libpaddle_pserver_cclient.a pthread) +endif(PROJ_ROOT) diff --git a/go/pserver/cclient/test/main.c b/go/pserver/cclient/test/main.c index f75a2110b947520dfec1265e56eaf2ba7ac3b51b..03f749d4e46c4890c6dcfa25af572dab4a053c86 100644 --- a/go/pserver/cclient/test/main.c +++ b/go/pserver/cclient/test/main.c @@ -1,68 +1,91 @@ #include +#include -#include "libclient.h" +#include "libpaddle_pserver_cclient.h" -void fail() { - // TODO(helin): fix: gtest using cmake is not working, using this - // hacky way for now. - printf("test failed.\n"); +// TODO(helin): Fix: gtest using cmake is not working, using this +// hacky way for now. +#define fail() \ + fprintf(stderr, "info: %s:%d: ", __FILE__, __LINE__); \ exit(-1); + +void sendGrads(paddle_pserver_client c) { + unsigned char grad_a[2000] = {2}; + unsigned char grad_b[3000] = {3}; + paddle_gradient grad1 = { + "param_a", PADDLE_ELEMENT_TYPE_FLOAT32, grad_a, 2000}; + paddle_gradient grad2 = { + "param_b", PADDLE_ELEMENT_TYPE_FLOAT32, grad_b, 3000}; + paddle_gradient* grads[2] = {&grad1, &grad2}; + if (paddle_send_grads(c, grads, 2)) { + fail(); + } +} + +void getParams(paddle_pserver_client c) { + paddle_parameter param_a; + paddle_parameter param_b; + char name_a[] = "param_a"; + char name_b[] = "param_b"; + // Must pre-allocate the prameter content before calling paddle_get_params. + unsigned char content_a[2000] = {}; + unsigned char content_b[3000] = {}; + param_a.element_type = PADDLE_ELEMENT_TYPE_FLOAT32; + param_a.name = name_a; + param_a.content = content_a; + param_a.content_len = 2000; + param_b.element_type = PADDLE_ELEMENT_TYPE_FLOAT32; + param_b.name = name_b; + param_b.content = content_b; + param_b.content_len = 3000; + + paddle_parameter* params[2] = {¶m_a, ¶m_b}; + if (paddle_get_params(c, params, 2)) { + fail(); + } } int main() { char addr[] = "localhost:3000"; - client c = paddle_new_pserver_client(addr, 1); + paddle_pserver_client c = paddle_new_pserver_client(addr, 1); retry: if (paddle_begin_init_params(c)) { paddle_parameter param; char name_a[] = "param_a"; char name_b[] = "param_b"; - unsigned char content[] = {0x00, 0x11, 0x22}; + unsigned char content_a[2000] = {1}; + unsigned char content_b[3000] = {0}; param.element_type = PADDLE_ELEMENT_TYPE_FLOAT32; param.name = name_a; - param.content = content; - param.content_len = 3; - if (paddle_init_param(c, param, NULL, 0) != 0) { + param.content = content_a; + param.content_len = 2000; + int error = paddle_init_param(c, param, NULL, 0); + if (error != 0) { goto retry; } - param.element_type = PADDLE_ELEMENT_TYPE_INT32; + + param.element_type = PADDLE_ELEMENT_TYPE_FLOAT32; param.name = name_b; - param.content = content; - param.content_len = 3; - if (paddle_init_param(c, param, NULL, 0) != 0) { + param.content = content_b; + param.content_len = 3000; + error = paddle_init_param(c, param, NULL, 0); + if (error != 0) { goto retry; } - if (paddle_finish_init_params(c) != 0) { + + error = paddle_finish_init_params(c); + if (error != 0) { goto retry; } - } else { - fail(); - } - - unsigned char content[] = {0x00, 0x11, 0x22}; - paddle_gradient grads[2] = { - {"param_a", PADDLE_ELEMENT_TYPE_INT32, content, 3}, - {"param_b", PADDLE_ELEMENT_TYPE_FLOAT32, content, 3}}; - - if (!paddle_send_grads(c, grads, 2)) { - fail(); } - paddle_parameter* params[2] = {NULL, NULL}; - char* names[] = {"param_a", "param_b"}; - if (!paddle_get_params(c, names, params, 2)) { - fail(); + int i; + for (i = 0; i < 100; i++) { + sendGrads(c); + getParams(c); } - // get parameters again by reusing the allocated parameter buffers. - if (!paddle_get_params(c, names, params, 2)) { - fail(); - } - - paddle_release_param(params[0]); - paddle_release_param(params[1]); - - if (!paddle_save_model(c, "/tmp/")) { + if (paddle_save_model(c, "/tmp/")) { fail(); } diff --git a/go/pserver/cclient/test/test_cclient.c b/go/pserver/cclient/test/test_cclient.c new file mode 100644 index 0000000000000000000000000000000000000000..0f9c2ef80114d4c5cd887117952f5b7b5d9355f6 --- /dev/null +++ b/go/pserver/cclient/test/test_cclient.c @@ -0,0 +1,117 @@ +#include +#include + +#include "libpaddle_pserver_cclient.h" + +typedef float real; + +void fail() { + // TODO(helin): fix: gtest using cmake is not working, using this + // hacky way for now. + printf("test failed.\n"); + exit(-1); +} + +void print_parameter(paddle_gradient* param) { + if (param == NULL) { + printf("param is NULL!!\n"); + } else { + printf("==== parameter ====\n"); + printf("name: %s\n", param->name); + printf("content_len: %d\n", param->content_len); + printf("content_type: %d\n", param->element_type); + int i; + for (i = 0; i < param->content_len / (int)sizeof(real); ++i) { + printf("%f ", ((float*)param->content)[i]); + } + printf("\n\n"); + } +} + +int main() { + char addr[] = "localhost:3000"; + paddle_pserver_client c = paddle_new_pserver_client(addr, 1); + + char* names[] = {"param_a", "param_b"}; + +retry: + printf("init parameter to pserver:\n"); + + real param_content1[] = {0.1, 0.2, 0.3}; + real param_content2[] = {0.4, 0.5, 0.6}; + paddle_parameter** params = + (paddle_parameter**)malloc(sizeof(paddle_parameter*) * 2); + params[0] = (paddle_parameter*)malloc(sizeof(paddle_parameter)); + params[0]->name = names[0]; + params[0]->content = (unsigned char*)param_content1; + params[0]->content_len = 3 * sizeof(real); + params[0]->element_type = PADDLE_ELEMENT_TYPE_FLOAT32; + + params[1] = (paddle_parameter*)malloc(sizeof(paddle_parameter)); + params[1]->name = names[1]; + params[1]->content = (unsigned char*)param_content2; + params[1]->content_len = 3 * sizeof(real); + params[1]->element_type = PADDLE_ELEMENT_TYPE_INT32; + + if (paddle_begin_init_params(c)) { + if (paddle_init_param(c, *params[0], NULL, 0) != 0) { + goto retry; + } + if (paddle_init_param(c, *params[1], NULL, 0) != 0) { + goto retry; + } + if (paddle_finish_init_params(c) != 0) { + goto retry; + } + } else { + fail(); + } + + printf("get inited parameters from pserver:\n"); + // get parameters again by reusing the allocated parameter buffers. + if (paddle_get_params(c, params, 2) != 0) { + fail(); + } + print_parameter(params[0]); + print_parameter(params[1]); + + printf("send gradient to pserver:\n"); + real gradient_content1[] = {0.01, 0.02, 0.03}; + real gradinet_content2[] = {0.04, 0.05, 0.06}; + + paddle_gradient** grads = + (paddle_gradient**)malloc(sizeof(paddle_gradient*) * 2); + grads[0] = (paddle_gradient*)malloc(sizeof(paddle_gradient)); + grads[0]->name = names[0]; + grads[0]->content = (unsigned char*)gradient_content1; + grads[0]->content_len = 3 * sizeof(real); + grads[0]->element_type = PADDLE_ELEMENT_TYPE_FLOAT32; + + grads[1] = (paddle_gradient*)malloc(sizeof(paddle_gradient)); + grads[1]->name = names[1]; + grads[1]->content = (unsigned char*)gradinet_content2; + grads[1]->content_len = 3 * sizeof(real); + grads[1]->element_type = PADDLE_ELEMENT_TYPE_INT32; + + printf("print gradient sent to pserver:\n"); + print_parameter(grads[0]); + print_parameter(grads[1]); + + if (paddle_send_grads(c, grads, 2) != 0) { + fail(); + } + + printf("get updated parameters from pserver:\n"); + // get parameters again by reusing the allocated parameter buffers. + if (paddle_get_params(c, params, 2) != 0) { + fail(); + } + print_parameter(params[0]); + print_parameter(params[1]); + + if (paddle_save_model(c, "/tmp/") != 0) { + fail(); + } + + return 0; +} diff --git a/go/pserver/cclient/test/test_mnist.py b/go/pserver/cclient/test/test_mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a3af55e2812fa0c965d22ddaba198f43f3c4ad --- /dev/null +++ b/go/pserver/cclient/test/test_mnist.py @@ -0,0 +1,131 @@ +import paddle.v2 as paddle +import gzip + + +def softmax_regression(img): + predict = paddle.layer.fc(input=img, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def multilayer_perceptron(img): + # The first fully-connected layer + hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu()) + # The second fully-connected layer and the according activation function + hidden2 = paddle.layer.fc(input=hidden1, + size=64, + act=paddle.activation.Relu()) + # The thrid fully-connected layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=hidden2, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def convolutional_neural_network(img): + # first conv layer + conv_pool_1 = paddle.networks.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + num_channel=1, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # second conv layer + conv_pool_2 = paddle.networks.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + num_channel=20, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # The first fully-connected layer + fc1 = paddle.layer.fc(input=conv_pool_2, + size=128, + act=paddle.activation.Tanh()) + # The softmax layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=fc1, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + + # define network topology + images = paddle.layer.data( + name='pixel', type=paddle.data_type.dense_vector(784)) + label = paddle.layer.data( + name='label', type=paddle.data_type.integer_value(10)) + + # Here we can build the prediction network in different ways. Please + # choose one by uncomment corresponding line. + predict = softmax_regression(images) + #predict = multilayer_perceptron(images) + #predict = convolutional_neural_network(images) + + cost = paddle.layer.classification_cost(input=predict, label=label) + parameters = paddle.parameters.create(cost) + + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1 / 128.0, + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128)) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer, + is_local=False, + pserver_spec="localhost:3000") + + lists = [] + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 1000 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + elif isinstance(event, paddle.event.EndPass): + result = trainer.test(reader=paddle.batch( + paddle.dataset.mnist.test(), batch_size=128)) + print "Test with Pass %d, Cost %f, %s\n" % ( + event.pass_id, result.cost, result.metrics) + lists.append((event.pass_id, result.cost, + result.metrics['classification_error_evaluator'])) + + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=128), + event_handler=event_handler, + num_passes=100) + + # find the best pass + best = sorted(lists, key=lambda list: float(list[1]))[0] + print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1]) + print 'The classification accuracy is %.2f%%' % (100 - float(best[2]) * 100) + + test_creator = paddle.dataset.mnist.test() + test_data = [] + for item in test_creator(): + test_data.append((item[0], )) + if len(test_data) == 100: + break + + # output is a softmax layer. It returns probabilities. + # Shape should be (100, 10) + probs = paddle.infer( + output_layer=predict, parameters=parameters, input=test_data) + print probs.shape + + +if __name__ == '__main__': + main() diff --git a/go/pserver/cclient/test/test_train.py b/go/pserver/cclient/test/test_train.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8d5d793bdeb687c9d234005d9e2eae760cc3a7 --- /dev/null +++ b/go/pserver/cclient/test/test_train.py @@ -0,0 +1,60 @@ +import paddle.v2 as paddle +import paddle.v2.dataset.uci_housing as uci_housing + + +def main(): + # init + paddle.init(use_gpu=False, trainer_count=1) + + # network config + x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) + y_predict = paddle.layer.fc(input=x, + param_attr=paddle.attr.Param(name='w'), + size=1, + act=paddle.activation.Linear(), + bias_attr=paddle.attr.Param(name='b')) + y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1)) + cost = paddle.layer.mse_cost(input=y_predict, label=y) + + # create parameters + parameters = paddle.parameters.create(cost) + + # create optimizer + optimizer = paddle.optimizer.Momentum(momentum=0) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer, + is_local=False, + pserver_spec="localhost:3000") + + # event_handler to print training and testing info + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f" % ( + event.pass_id, event.batch_id, event.cost) + + if isinstance(event, paddle.event.EndPass): + if (event.pass_id + 1) % 10 == 0: + result = trainer.test( + reader=paddle.batch( + uci_housing.test(), batch_size=2), + feeding={'x': 0, + 'y': 1}) + print "Test %d, %.2f" % (event.pass_id, result.cost) + + # training + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + uci_housing.train(), buf_size=500), + batch_size=2), + feeding={'x': 0, + 'y': 1}, + event_handler=event_handler, + num_passes=30) + + +if __name__ == '__main__': + main() diff --git a/go/pserver/client.go b/go/pserver/client.go index f8bd0aa59f30ec7e2b2d318929af96135d3128ed..dda915977282d4880ddcc8c18ef6fd80ede9e01b 100644 --- a/go/pserver/client.go +++ b/go/pserver/client.go @@ -2,11 +2,11 @@ package pserver import ( "hash/fnv" - "log" "sort" "time" - "github.com/PaddlePaddle/Paddle/go/pserver/internal/connection" + "github.com/PaddlePaddle/Paddle/go/connection" + log "github.com/sirupsen/logrus" ) // TODO(helin): add RPC call retry logic @@ -47,7 +47,7 @@ func NewClient(l Lister, pserverNum int, sel Selector) *Client { // monitorPservers monitors pserver addresses, and updates connection // when the address changes. func (c *Client) monitorPservers(l Lister, pserverNum int) { - knownServers := make([]Server, pserverNum) + lastServers := make([]Server, pserverNum) ticker := time.NewTicker(10 * time.Second) monitor := func() { curServers := make([]Server, pserverNum) @@ -56,25 +56,37 @@ func (c *Client) monitorPservers(l Lister, pserverNum int) { curServers[l.Index] = l } - for i := range knownServers { - if knownServers[i].Addr != curServers[i].Addr { - err := c.pservers[i].Connect(curServers[i].Addr) - if err != nil { - log.Println(err) + for i := range lastServers { + if lastServers[i].Addr == curServers[i].Addr { + continue + } - // connect to addr failed, set - // to last known addr in order - // to retry next time. - curServers[i].Addr = knownServers[i].Addr + if curServers[i].Addr == "" { + err := c.pservers[i].Close() + if err != nil { + log.Errorln(err) } + + continue } + + err := c.pservers[i].Connect(curServers[i].Addr) + if err != nil { + log.Errorln(err) + + // connect to addr failed, set + // to last known addr in order + // to retry next time. + curServers[i].Addr = lastServers[i].Addr + } + } - knownServers = curServers + lastServers = curServers } monitor() - for _ = range ticker.C { + for range ticker.C { monitor() } } @@ -93,16 +105,14 @@ func (c *Client) BeginInitParams() bool { // InitParam initializes the parameter on parameter servers. func (c *Client) InitParam(paramWithConfigs ParameterWithConfig) error { - var dummy int - return c.pservers[c.partition(paramWithConfigs.Param.Name)].Call("Service.InitParam", paramWithConfigs, &dummy) + return c.pservers[c.partition(paramWithConfigs.Param.Name)].Call("Service.InitParam", paramWithConfigs, nil) } // FinishInitParams tells parameter servers client has sent all // parameters to parameter servers as initialization. func (c *Client) FinishInitParams() error { for _, p := range c.pservers { - var dummy int - err := p.Call("Service.FinishInitParams", dummy, &dummy) + err := p.Call("Service.FinishInitParams", 0, nil) if err != nil { return err } @@ -116,8 +126,7 @@ func (c *Client) SendGrads(grads []Gradient) error { errCh := make(chan error, len(grads)) for _, g := range grads { go func(g Gradient) { - var dummy int - err := c.pservers[c.partition(g.Name)].Call("Service.SendGrad", g, &dummy) + err := c.pservers[c.partition(g.Name)].Call("Service.SendGrad", g, nil) errCh <- err }(g) } @@ -196,8 +205,7 @@ func (c *Client) Save(path string) error { errCh := make(chan error, len(c.pservers)) for _, p := range c.pservers { - var dummy int - err := p.Call("Service.Save", path, &dummy) + err := p.Call("Service.Save", path, nil) errCh <- err } diff --git a/go/pserver/client_test.go b/go/pserver/client_test.go index a9a0948a51a31a1c7393f716e3dfc436dbf919af..d0371a26a13fac9daecacd0b6a271caa6d830651 100644 --- a/go/pserver/client_test.go +++ b/go/pserver/client_test.go @@ -117,7 +117,7 @@ func TestClientFull(t *testing.T) { for i := range params { if names[i] != params[i].Name { - t.Fatalf("order of returned parameter does not required: parameter name: %s, required name: %s", names[i], params[i]) + t.Fatalf("order of returned parameter does not required: parameter name: %s, required name: %s", names[i], params[i].Name) } } } diff --git a/go/pserver/optimizer.c b/go/pserver/optimizer.c index b8da3ec9592053e3efe00e69d73a8ae259a30a2f..f16ba2cbf8e168a434fdcdb4f1e0ba1e98d91c6b 100644 --- a/go/pserver/optimizer.c +++ b/go/pserver/optimizer.c @@ -32,7 +32,13 @@ int update_SGD(void* optimizer, const void* gradient, int num_bytes) { SGD_optimizer* o = (SGD_optimizer*)optimizer; - // TODO + float* parameter = (float*)buffer; + float* grad = (float*)gradient; + + int i; + for (i = 0; i < num_bytes / sizeof(float); ++i) { + parameter[i] -= o->learning_rate * grad[i]; + } return 0; } diff --git a/go/pserver/service.go b/go/pserver/service.go index d5787b9708bb15629a6e6290ffc97ee9885bc8b8..78a2bfaf6347019333bf1c7ee6cdc04d93ab1370 100644 --- a/go/pserver/service.go +++ b/go/pserver/service.go @@ -9,8 +9,10 @@ import ( // ElementType is the type of elements of a Parameter. type ElementType int -var ErrAlreadyInitialized = errors.New("pserver already initialized") -var ErrUninitialized = errors.New("pserver not fully initialized") +const ( + AlreadyInitialized = "pserver already initialized" + Uninitialized = "pserver not fully initialized" +) // Supported element types const ( @@ -49,7 +51,7 @@ type Service struct { // NewService creates a new service. func NewService() *Service { - s := &Service{opt: newOptimizer(sgd, 0.01)} + s := &Service{opt: newOptimizer(sgd, 0.005)} s.paramMap = make(map[string]Parameter) s.initialized = make(chan struct{}) return s @@ -59,7 +61,7 @@ func NewService() *Service { func (s *Service) InitParam(paramWithConfigs ParameterWithConfig, dummy *int) error { select { case <-s.initialized: - return ErrAlreadyInitialized + return errors.New(AlreadyInitialized) default: } @@ -80,7 +82,7 @@ func (s *Service) InitParam(paramWithConfigs ParameterWithConfig, dummy *int) er func (s *Service) FinishInitParams(dummy0 int, dummy1 *int) error { select { case <-s.initialized: - return ErrAlreadyInitialized + return errors.New(AlreadyInitialized) default: } @@ -94,7 +96,7 @@ func (s *Service) SendGrad(g Gradient, dummy *int) error { select { case <-s.initialized: default: - return ErrUninitialized + return errors.New(Uninitialized) } s.mu.Lock() diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index 4c9fac4536e09013916aadb26af3a86a5a775b4f..b746d13e1ca71e697c464f84d915af029d37120c 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -15,8 +15,7 @@ func TestFull(t *testing.T) { p.Name = "param_a" p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0} p.ElementType = pserver.Int32 - var dummy int - err := s.InitParam(pserver.ParameterWithConfig{p, nil}, &dummy) + err := s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil) if err != nil { t.FailNow() } @@ -25,12 +24,12 @@ func TestFull(t *testing.T) { p1.Name = "param_b" p1.Content = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} p1.ElementType = pserver.Float32 - err = s.InitParam(pserver.ParameterWithConfig{p1, nil}, &dummy) + err = s.InitParam(pserver.ParameterWithConfig{Param: p1, Config: nil}, nil) if err != nil { t.FailNow() } - err = s.FinishInitParams(0, &dummy) + err = s.FinishInitParams(0, nil) if err != nil { t.FailNow() } @@ -46,11 +45,11 @@ func TestFull(t *testing.T) { } g1, g2 := pserver.Gradient(p1), pserver.Gradient(p) - err = s.SendGrad(g1, &dummy) + err = s.SendGrad(g1, nil) if err != nil { t.FailNow() } - err = s.SendGrad(g2, &dummy) + err = s.SendGrad(g2, nil) if err != nil { t.FailNow() @@ -74,23 +73,21 @@ func TestFull(t *testing.T) { func TestMultipleInit(t *testing.T) { s := pserver.NewService() - var dummy int - err := s.FinishInitParams(0, &dummy) + err := s.FinishInitParams(0, nil) if err != nil { t.FailNow() } - err = s.FinishInitParams(0, &dummy) - if err != pserver.ErrAlreadyInitialized { + err = s.FinishInitParams(0, nil) + if err.Error() != pserver.AlreadyInitialized { t.FailNow() } } func TestUninitialized(t *testing.T) { s := pserver.NewService() - var dummy int - err := s.SendGrad(pserver.Gradient{}, &dummy) - if err != pserver.ErrUninitialized { + err := s.SendGrad(pserver.Gradient{}, nil) + if err.Error() != pserver.Uninitialized { t.FailNow() } } @@ -98,13 +95,14 @@ func TestUninitialized(t *testing.T) { func TestBlockUntilInitialized(t *testing.T) { s := pserver.NewService() ch := make(chan struct{}, 2) + errCh := make(chan error, 2) var wg sync.WaitGroup wg.Add(1) go func() { var param pserver.Parameter err := s.GetParam("param_a", ¶m) if err != nil { - t.FailNow() + errCh <- err } wg.Done() ch <- struct{}{} @@ -112,10 +110,9 @@ func TestBlockUntilInitialized(t *testing.T) { wg.Add(1) go func() { - var dummy int - err := s.Save("", &dummy) + err := s.Save("", nil) if err != nil { - t.FailNow() + errCh <- err } wg.Done() ch <- struct{}{} @@ -127,6 +124,8 @@ func TestBlockUntilInitialized(t *testing.T) { case <-ch: // some function returned before initialization is completed. t.FailNow() + case <-errCh: + t.FailNow() default: } @@ -134,13 +133,12 @@ func TestBlockUntilInitialized(t *testing.T) { p.Name = "param_a" p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0} p.ElementType = pserver.Int32 - var dummy int - err := s.InitParam(pserver.ParameterWithConfig{p, nil}, &dummy) + err := s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil) if err != nil { t.FailNow() } - err = s.FinishInitParams(0, &dummy) + err = s.FinishInitParams(0, nil) if err != nil { t.FailNow() } diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 071bc36c2ded51ba977350aeae15f6d244cea5be..c9433a38de4d005ebe229c55916401a5f82e9ef3 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -16,7 +16,7 @@ set(API_HEADER Internal.h) add_library(paddle_api STATIC ${API_SOURCES}) -add_dependencies(paddle_api gen_proto_cpp) +add_dependencies(paddle_api gen_proto_cpp paddle_pserver_cclient_lib) INCLUDE(${SWIG_USE_FILE}) INCLUDE_DIRECTORIES(${PROJ_ROOT}/paddle) @@ -45,7 +45,7 @@ SET(SWIG_MODULE_swig_paddle_EXTRA_DEPS ) IF(APPLE) - SET(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load") + SET(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load -framework CoreFoundation -framework Security") ELSE(APPLE) SET(START_GROUP "-Xlinker -start-group") SET(END_GROUP "-Xlinker -end-group") diff --git a/paddle/api/Paddle.i b/paddle/api/Paddle.i index 068ba286c07d8854a1a7c7042224a679b50b4957..3237e73745dca58bed923b20851f0f0039a3487c 100644 --- a/paddle/api/Paddle.i +++ b/paddle/api/Paddle.i @@ -179,6 +179,7 @@ namespace std { %newobject ParameterOptimizer::needSpecialTraversal; %newobject ParameterUpdater::createLocalUpdater; %newobject ParameterUpdater::createRemoteUpdater; +%newobject ParameterUpdater::createNewRemoteUpdater; %feature("director") UpdateCallback; %feature("autodoc", 1); // To generate method stub, for code hint in ide diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index da0f157abd68c73c45f498cf9ef2726aac67c95b..7565ea51fe3e71bf81a28e6e4b5a2bbdd085798c 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -841,6 +841,8 @@ public: static ParameterUpdater* createRemoteUpdater(OptimizationConfig* config, int passCount, bool useSparseUpdater); + static ParameterUpdater* createNewRemoteUpdater( + OptimizationConfig* config, const std::string pserverSpec); ~ParameterUpdater(); /** diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 79921ea6e787f3c0ebecaad6a9a54bac92211320..eaf8518ae2beaa93bc40ee944c984d142d2bb951 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -15,6 +15,7 @@ limitations under the License. */ #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" +#include "paddle/trainer/NewRemoteParameterUpdater.h" #include "paddle/trainer/RemoteParameterUpdater.h" #include "paddle/trainer/ThreadParameterUpdater.h" @@ -28,6 +29,14 @@ ParameterUpdater *ParameterUpdater::createLocalUpdater( return updater; } +ParameterUpdater *ParameterUpdater::createNewRemoteUpdater( + OptimizationConfig *config, const std::string pserverSpec) { + auto updater = new ParameterUpdater(); + updater->m->updater.reset(new paddle::NewRemoteParameterUpdater( + config->m->getConfig(), pserverSpec)); + return updater; +} + ParameterUpdater *ParameterUpdater::createRemoteUpdater( OptimizationConfig *config, int passCount, bool useSparseUpdater) { auto updater = new ParameterUpdater(); diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index 1f54ac1231c6ac2e19b25bb336292194c63c11e9..5e170714cf5b183fcf6e76d34746333397e6b060 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -14,8 +14,8 @@ add_library(paddle_function STATIC ${cpp_files} ${cu_objs}) add_dependencies(paddle_function ${external_project_dependencies}) add_dependencies(paddle_function gen_proto_cpp) -if(WITH_GPU) if(WITH_TESTING) +if(WITH_GPU) # TODO: # file(GLOB test_files . *OpTest.cpp) # add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files}) @@ -30,6 +30,8 @@ if(WITH_TESTING) add_simple_unittest(CosSimOpTest) add_simple_unittest(RowConvOpTest) endif() + +add_simple_unittest(ConvOpTest) endif() add_style_check_target(paddle_function ${h_files}) diff --git a/paddle/function/ContextProjectionOpTest.cpp b/paddle/function/ContextProjectionOpTest.cpp index 1b25172ca5c0c4e64db01806fb8239af7e06d90d..9e9dd20e6f3abe3bd087e434d7b64eec5bfadcfb 100644 --- a/paddle/function/ContextProjectionOpTest.cpp +++ b/paddle/function/ContextProjectionOpTest.cpp @@ -28,7 +28,7 @@ void testMatrixProjectionForward(int context_start, std::max(0, (int)(context_start + context_length - 1)); if (pad == 0) is_padding = false; - FunctionCompare test( + CpuGpuFuncCompare test( "ContextProjectionForward", FuncConfig() .set("context_length", context_length) @@ -60,7 +60,7 @@ void testMatrixProjectionBackward(int context_start, std::max(0, (int)(context_start + context_length - 1)); if (pad == 0) is_padding = false; - FunctionCompare test( + CpuGpuFuncCompare test( "ContextProjectionBackward", FuncConfig() .set("context_length", context_length) diff --git a/paddle/function/ConvOp.h b/paddle/function/ConvOp.h new file mode 100644 index 0000000000000000000000000000000000000000..65b9d1d53f9210b08cdc8bbd9d93b03305e582e4 --- /dev/null +++ b/paddle/function/ConvOp.h @@ -0,0 +1,146 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Function.h" + +namespace paddle { + +/* + * \brief Based on the ConvFunctionBase class, the forward calculation, + * backward input calculation and backward filter calculation + * of convolution operations can be implemented. + * + * Arguments of forward and backward calculation: + * 1. Forward calculation of convolution. + * inputs = {INPUT, FILTER}, outputs = {OUTPUT} + * The first and second input arguments are input image and filter data. + * The output argument is output image. + * + * 2. Backward input calculation of convolution. + * inputs = {OUTPUT_GRAD, FILTER}, outputs = {INPUT_GRAD} + * The first and second input arguments are output grad image + * and filter data. + * The output argument is input grad image. + * + * 3. Backward filter calculation of convolution. + * inputs = {OUTPUT_GRAD, INPUT}, outputs = {FILTER_GRAD} + * The first and second input arguments are output grad image + * and input image. + * The output argument is filter grad. + * + * Arguments format of input, filter and output: + * 1. Input image, output image, input image gradient, output image gradient + * are all NCHW format. Where N is batch size, C is the number of channels, + * H and W is the height and width of image or image gradient. + * + * 2. The format of the filter data is MCHW, where M is the number of output + * image channels, C is the number of input image channels, + * H and W is height and width of filter. + * + * If `groups` is greater than 1, the filter's data format should be GMCHW, + * where G is the `groups`, and G * M is the number of output image + * channels, G * C is the number of input image channels, + * H and W is height and width of filter. + */ +class ConvFunctionBase : public FunctionBase { +public: + void init(const FuncConfig& config) override { + // function arguments + strides_ = config.get>("strides"); + paddings_ = config.get>("paddings"); + groups_ = config.get("groups"); + + // number of inputs and outputs + numInputs_ = 2; + numOutputs_ = 1; + } + + virtual void calc(const BufferArgs& inputs, const BufferArgs& outputs) {} + + // input can be INPUT and INPUT_GRAD + // filter can be FILTER and FILTER_GRAD + // output can be OUTPUT and OUTPUT_GRAD + void check(const TensorShape& input, + const TensorShape& filter, + const TensorShape& output) { + // inputs and outputs arguments should be 4-dimensional. + CHECK_EQ(input.ndims(), (size_t)4); + CHECK_EQ(output.ndims(), (size_t)4); + // The batchSize of the input needs to be equal to + // the batchSize of the output. + CHECK_EQ(input[0], output[0]); + + if (filter.ndims() == (size_t)4) { + // If the filter's dimension is 4, groups convolution is not supported. + CHECK_EQ(groups_, (size_t)1); + // The input and output channel dimensions are the second and first + // dimensions of the filter shape. + CHECK_EQ(input[1], filter[1]); + CHECK_EQ(output[1], filter[0]); + } else { + // filter argument should be 5-dimensional. + CHECK_EQ(filter.ndims(), (size_t)5); + // The first dimension of the filter is the size of the group + CHECK_EQ(filter[0], groups_); + // The input and output channel dimensions are the third and second + // dimensions of the filter shape. + CHECK_EQ(input[1], filter[2] * groups_); + CHECK_EQ(output[1], filter[1] * groups_); + } + } + +protected: + size_t getFilterHeight(const TensorShape& filter) const { + return filter[filter.ndims() - 2]; + } + + size_t getFilterWidth(const TensorShape& filter) const { + return filter[filter.ndims() - 1]; + } + + std::vector strides_; + std::vector paddings_; + + /// Group size, refer to grouped convolution in + /// Alex Krizhevsky's paper: when group=2, the first half of the + /// filters are only connected to the first half of the input channels, + /// and the second half only connected to the second half. + size_t groups_; + + inline int strideH() const { return strides_[0]; } + + inline int strideW() const { return strides_[1]; } + + inline int paddingH() const { return paddings_[0]; } + + inline int paddingW() const { return paddings_[1]; } + + // A temporary memory in convolution calculation. + MemoryHandlePtr memory_; + + template + void resizeBuffer(size_t newSize) { + if (!memory_ || newSize * sizeof(real) > memory_->getAllocSize()) { + if (Device == DEVICE_TYPE_CPU) { + memory_ = std::make_shared(newSize * sizeof(real)); + } else { + memory_ = std::make_shared(newSize * sizeof(real)); + } + } + } +}; + +} // namespace paddle diff --git a/paddle/function/ConvOpTest.cpp b/paddle/function/ConvOpTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dfa2f784610b0dd60340e0ebc6a066437f3715eb --- /dev/null +++ b/paddle/function/ConvOpTest.cpp @@ -0,0 +1,210 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "Function.h" +#include "FunctionTest.h" + +namespace paddle { + +enum TestType { + kForwardTest = 0, + kBackwardInputTest = 1, + kBackwardFilterTest = 2, +}; + +template +class ConvolutionTest { +public: + ConvolutionTest(const std::string& conv1, + const std::string& conv2, + TestType type, + std::string algo = "auto") { + for (size_t batchSize : {1, 32}) { + for (size_t inputSize : {7, 14, 54}) { + for (size_t filterSize : {1, 3, 5}) { + for (size_t inputChannels : {3, 64}) { + for (size_t outputChannels : {3, 64, 128}) { + if (inputChannels < outputChannels) break; + for (size_t stride : {1, 2}) { + for (size_t padding : {0, 1}) { + if (padding >= filterSize) break; + size_t outputSize = + (inputSize - filterSize + 2 * padding + stride) / stride; + VLOG(3) << " batchSize=" << batchSize + << " inputChannels=" << inputChannels + << " inputHeight=" << inputSize + << " inputWidth=" << inputSize + << " outputChannels=" << outputChannels + << " filterHeight=" << filterSize + << " filterWidth=" << filterSize + << " outputHeight=" << outputSize + << " outputWidth=" << outputSize + << " stride=" << stride << " padding=" << padding; + + std::vector paddings = {padding, padding}; + std::vector strides = {stride, stride}; + Compare2Function test( + conv1, + conv2, + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", (size_t)1) + .set("algo", algo)); + + TensorShape input{ + batchSize, inputChannels, inputSize, inputSize}; + TensorShape filter{ + outputChannels, inputChannels, filterSize, filterSize}; + TensorShape output{ + batchSize, outputChannels, outputSize, outputSize}; + + if (type == kForwardTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.run(); + } else if (type == kBackwardInputTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO); + test.run(); + } else if (type == kBackwardFilterTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.run(); + } + } + } + } + } + } + } + } + } +}; + +// Mainly used to test cases where the height and width (input, filter) +// are not equal. +template +class ConvolutionTest2 { +public: + ConvolutionTest2(const std::string& conv1, + const std::string& conv2, + TestType type, + std::string algo = "auto") { + for (size_t batchSize : {16}) { + for (size_t inputHeight : {7, 31}) { + for (size_t inputWidth : {10, 54}) { + for (size_t filterHeight : {1, 5}) { + for (size_t filterWidth : {3, 7}) { + for (size_t inputChannels : {7}) { + for (size_t outputChannels : {32}) { + size_t stride = 1; + size_t padding = 0; + size_t outputHeight = + (inputHeight - filterHeight + 2 * padding + stride) / + stride; + size_t outputWidth = + (inputWidth - filterWidth + 2 * padding + stride) / + stride; + VLOG(3) << " batchSize=" << batchSize + << " inputChannels=" << inputChannels + << " inputHeight=" << inputHeight + << " inputWidth=" << inputWidth + << " outputChannels=" << outputChannels + << " filterHeight=" << filterHeight + << " filterWidth=" << filterWidth + << " outputHeight=" << outputHeight + << " outputWidth=" << outputWidth + << " stride=" << stride << " padding=" << padding; + + std::vector paddings = {padding, padding}; + std::vector strides = {stride, stride}; + Compare2Function test( + conv1, + conv2, + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", (size_t)1) + .set("algo", algo)); + + TensorShape input{ + batchSize, inputChannels, inputHeight, inputWidth}; + TensorShape filter{ + outputChannels, inputChannels, filterHeight, filterWidth}; + TensorShape output{ + batchSize, outputChannels, outputHeight, outputWidth}; + + if (type == kForwardTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.run(); + } else if (type == kBackwardInputTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO); + test.run(); + } else if (type == kBackwardFilterTest) { + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output)); + test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input)); + test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter)); + test.run(); + } + } + } + } + } + } + } + } + } +}; + +TEST(Forward, GEMM) { + ConvolutionTest test( + "NaiveConv-CPU", "GemmConv-CPU", kForwardTest); + ConvolutionTest2 test2( + "NaiveConv-CPU", "GemmConv-CPU", kForwardTest); +} + +#ifndef PADDLE_ONLY_CPU +TEST(Forward, GEMM2) { + ConvolutionTest test( + "GemmConv-CPU", "GemmConv-GPU", kForwardTest); + ConvolutionTest2 test2( + "GemmConv-CPU", "GemmConv-GPU", kForwardTest); +} + +TEST(BackwardInput, GEMM) { + ConvolutionTest test( + "GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest); + ConvolutionTest2 test2( + "GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest); +} + +TEST(BackwardFilter, GEMM) { + ConvolutionTest test( + "GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest); + ConvolutionTest2 test2( + "GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest); +} +#endif + +} // namespace paddle diff --git a/paddle/function/CosSimOpTest.cpp b/paddle/function/CosSimOpTest.cpp index 48c815f027161b48c17ce654ab819156fd856199..f6c0041101f50f8f47d45e0fe0fe1064e0f9cb69 100644 --- a/paddle/function/CosSimOpTest.cpp +++ b/paddle/function/CosSimOpTest.cpp @@ -22,7 +22,7 @@ void testCosSimForward(size_t height_x, size_t height_y, size_t width, real scale) { - FunctionCompare test("CosSimForward", FuncConfig().set("scale", scale)); + CpuGpuFuncCompare test("CosSimForward", FuncConfig().set("scale", scale)); // prepare input arguments test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, width})); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_y, width})); @@ -36,7 +36,7 @@ void testCosSimBackward(size_t height_x, size_t height_y, size_t width, real scale) { - FunctionCompare test("CosSimBackward", FuncConfig().set("scale", scale)); + CpuGpuFuncCompare test("CosSimBackward", FuncConfig().set("scale", scale)); // prepare input arguments test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1})); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{height_x, 1})); diff --git a/paddle/function/CrossMapNormalOpTest.cpp b/paddle/function/CrossMapNormalOpTest.cpp index 51f5da81bfc9ae870ac9949ba74da01a9449a04d..ed17b17da616db9d52318f21c133458d698b0dd8 100644 --- a/paddle/function/CrossMapNormalOpTest.cpp +++ b/paddle/function/CrossMapNormalOpTest.cpp @@ -28,11 +28,11 @@ TEST(CrossMapNormal, real) { << " size=" << size; // init Test object - FunctionCompare test("CrossMapNormal", - FuncConfig() - .set("size", size) - .set("scale", (real)1.5) - .set("pow", (real)0.5)); + CpuGpuFuncCompare test("CrossMapNormal", + FuncConfig() + .set("size", size) + .set("scale", (real)1.5) + .set("pow", (real)0.5)); // prepare input arguments TensorShape shape{numSamples, channels, imgSizeH, imgSizeW}; test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape)); @@ -57,11 +57,11 @@ TEST(CrossMapNormalGrad, real) { << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW << " size=" << size; - FunctionCompare test("CrossMapNormalGrad", - FuncConfig() - .set("size", size) - .set("scale", (real)1.5) - .set("pow", (real)0.5)); + CpuGpuFuncCompare test("CrossMapNormalGrad", + FuncConfig() + .set("size", size) + .set("scale", (real)1.5) + .set("pow", (real)0.5)); TensorShape shape{numSamples, channels, imgSizeH, imgSizeW}; test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape)); test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape)); diff --git a/paddle/function/FunctionTest.h b/paddle/function/FunctionTest.h index 0cfafdb27f55a3e6617d31a968d2a05fc77f5b46..ba446bf92da264fafa1fb47a2c30da9cb13176ce 100644 --- a/paddle/function/FunctionTest.h +++ b/paddle/function/FunctionTest.h @@ -22,14 +22,62 @@ namespace paddle { typedef std::shared_ptr BufferArgPtr; +namespace test { +template +struct Allocator; + +template <> +struct Allocator { + using type = CpuMemoryHandle; +}; + +template <> +struct Allocator { + using type = GpuMemoryHandle; +}; + +// Copy argument1 to argument2 +template +class CopyArgument { +public: + void operator()(const BufferArg& arg1, BufferArg& arg2) { + CHECK_EQ(arg1.valueType(), arg2.valueType()); + CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements()); + + if (arg1.valueType() == VALUE_TYPE_INT32) { + IVectorPtr vector1 = + IVector::create((int*)arg1.data(), + arg1.shape().getElements(), + DType1 == DEVICE_TYPE_CPU ? false : true); + IVectorPtr vector2 = + IVector::create((int*)arg2.data(), + arg2.shape().getElements(), + DType2 == DEVICE_TYPE_CPU ? false : true); + vector2->copyFrom(*vector1); + } else { + VectorPtr vector1 = + Vector::create((real*)arg1.data(), + arg1.shape().getElements(), + DType1 == DEVICE_TYPE_CPU ? false : true); + VectorPtr vector2 = + Vector::create((real*)arg2.data(), + arg2.shape().getElements(), + DType2 == DEVICE_TYPE_CPU ? false : true); + vector2->copyFrom(*vector1); + } + } +}; +} // namespace test + /** - * \brief A class for comparing CPU and GPU implementations of Function. - * + * \brief A class for comparing two Functions of different implementations. + * For example, can be used to compare the CPU and GPU implementation + * of the function is consistent. * * Use case: * // Initializes a test object, the corresponding cpu and gpu Function * // are constructed according to FunctionName and FuncConfig. - * FunctionCompare test(FunctionName, FuncConfig); + * CpuGpuFuncCompare test(FunctionName, FuncConfig); * // Prepare inputs and outputs arguments. * // Here the input and output can not contain real data, * // only contains the argument type and shape. @@ -45,28 +93,38 @@ typedef std::shared_ptr BufferArgPtr; * // Compares CPU and GPU calculation results for consistency. * test.run(); */ -class FunctionCompare { +template +class Compare2Function { public: - FunctionCompare(const std::string& name, const FuncConfig& config) - : cpuFunc_(FunctionBase::funcRegistrar_.createByType(name + "-CPU")), - gpuFunc_(FunctionBase::funcRegistrar_.createByType(name + "-GPU")) { - cpuFunc_->init(config); - gpuFunc_->init(config); + typedef typename test::Allocator::type Allocator1; + typedef typename test::Allocator::type Allocator2; + typedef typename Tensor::Vector Vector1; + typedef typename Tensor::Vector Vector2; + typedef typename Tensor::SparseMatrix SparseMatrix1; + typedef typename Tensor::SparseMatrix SparseMatrix2; + + Compare2Function(const std::string& name1, + const std::string& name2, + const FuncConfig& config) + : function1_(FunctionBase::funcRegistrar_.createByType(name1)), + function2_(FunctionBase::funcRegistrar_.createByType(name2)) { + function1_->init(config); + function2_->init(config); } - ~FunctionCompare() {} + ~Compare2Function() {} // input need only contains shape, do not contains data. void addInputs(const BufferArg& input) { size_t size = input.shape().getElements() * sizeOfValuType(input.valueType()); - cpuMemory_.emplace_back(std::make_shared(size)); - gpuMemory_.emplace_back(std::make_shared(size)); + func1Memory_.emplace_back(std::make_shared(size)); + func2Memory_.emplace_back(std::make_shared(size)); - cpuInputs_.emplace_back(std::make_shared( - cpuMemory_.back()->getBuf(), input.valueType(), input.shape())); - gpuInputs_.emplace_back(std::make_shared( - gpuMemory_.back()->getBuf(), input.valueType(), input.shape())); + func1Inputs_.emplace_back(std::make_shared( + func1Memory_.back()->getBuf(), input.valueType(), input.shape())); + func2Inputs_.emplace_back(std::make_shared( + func2Memory_.back()->getBuf(), input.valueType(), input.shape())); } // assume one copy of sequence is shared by different SequenceArgs @@ -75,62 +133,57 @@ public: size_t batchSize = input.shape()[0]; size_t numSeqs = batchSize / 10 + 1; size_t sizeId = (numSeqs + 1) * sizeOfValuType(VALUE_TYPE_INT32); - cpuMemory_.emplace_back(std::make_shared(sizeId)); - gpuMemory_.emplace_back(std::make_shared(sizeId)); - cpuSeq_ = std::make_shared(cpuMemory_.back()->getBuf(), - TensorShape{numSeqs + 1}); - gpuSeq_ = std::make_shared(gpuMemory_.back()->getBuf(), - TensorShape{numSeqs + 1}); + func1Memory_.emplace_back(std::make_shared(sizeId)); + func2Memory_.emplace_back(std::make_shared(sizeId)); + seq1_ = std::make_shared(func1Memory_.back()->getBuf(), + TensorShape{numSeqs + 1}); + seq2_ = std::make_shared(func2Memory_.back()->getBuf(), + TensorShape{numSeqs + 1}); /// init sequence Id - initArg(*cpuSeq_, batchSize); + initArg(*seq1_, batchSize); - // todo(tianbing), delete it - CHECK_EQ(cpuSeq_->shape().getElements(), cpuSeq_->numSeqs() + 1); - - CpuIVector cpuSeq(cpuSeq_->shape().getElements(), (int*)cpuSeq_->data()); - GpuIVector gpuSeq(gpuSeq_->shape().getElements(), (int*)gpuSeq_->data()); - gpuSeq.copyFrom(cpuSeq); + copyArg_(*seq1_, *seq2_); } void addInputs(const SequenceArg& input) { CHECK_EQ(input.shape().ndims(), 2UL); size_t batchSize = input.shape()[0]; - if (!cpuSeq_ || !gpuSeq_) { // sequence not exist + if (!seq1_ || !seq2_) { // sequence not exist addSequence(SequenceIdArg(TensorShape{batchSize})); } size_t size = input.shape().getElements() * sizeOfValuType(input.valueType()); - cpuMemory_.emplace_back(std::make_shared(size)); - gpuMemory_.emplace_back(std::make_shared(size)); + func1Memory_.emplace_back(std::make_shared(size)); + func2Memory_.emplace_back(std::make_shared(size)); /// SequenceArg - cpuInputs_.emplace_back( - std::make_shared(cpuMemory_.back()->getBuf(), + func1Inputs_.emplace_back( + std::make_shared(func1Memory_.back()->getBuf(), input.valueType(), input.shape(), - *cpuSeq_)); - gpuInputs_.emplace_back( - std::make_shared(gpuMemory_.back()->getBuf(), + *seq1_)); + func2Inputs_.emplace_back( + std::make_shared(func2Memory_.back()->getBuf(), input.valueType(), input.shape(), - *gpuSeq_)); + *seq2_)); } // output need only contains shape, do not contains data. void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) { size_t size = output.shape().getElements() * sizeOfValuType(output.valueType()); - cpuMemory_.emplace_back(std::make_shared(size)); - gpuMemory_.emplace_back(std::make_shared(size)); + func1Memory_.emplace_back(std::make_shared(size)); + func2Memory_.emplace_back(std::make_shared(size)); - cpuOutputs_.emplace_back( - std::make_shared(cpuMemory_.back()->getBuf(), + func1Outputs_.emplace_back( + std::make_shared(func1Memory_.back()->getBuf(), output.valueType(), output.shape(), argType)); - gpuOutputs_.emplace_back( - std::make_shared(gpuMemory_.back()->getBuf(), + func2Outputs_.emplace_back( + std::make_shared(func2Memory_.back()->getBuf(), output.valueType(), output.shape(), argType)); @@ -138,14 +191,14 @@ public: /// add and init output sparse matrix void addOutputs(const SparseMatrixArg& output, ArgType argType = ASSIGN_TO) { - cpuSparse_ = std::make_shared( + sparse1_ = std::make_shared( output.shape()[0], output.shape()[1], output.nnz(), static_cast(output.dataType()), static_cast(output.dataFormat())); - gpuSparse_ = std::make_shared( + sparse2_ = std::make_shared( output.shape()[0], output.shape()[1], output.nnz(), @@ -154,52 +207,52 @@ public: /// init sparse matrix hl_stream_t stream(HPPL_STREAM_1); - cpuSparse_->randomizeUniform(); - gpuSparse_->copyFrom(*cpuSparse_, stream); + sparse1_->randomizeUniform(); + sparse2_->copyFrom(*sparse1_, stream); hl_stream_synchronize(stream); - cpuOutputs_.emplace_back( - std::make_shared(*cpuSparse_, argType)); - gpuOutputs_.emplace_back( - std::make_shared(*gpuSparse_, argType)); + func1Outputs_.emplace_back( + std::make_shared(*sparse1_, argType)); + func2Outputs_.emplace_back( + std::make_shared(*sparse2_, argType)); } void addOutputs(const SequenceArg& output, ArgType argType = ASSIGN_TO) { CHECK_EQ(output.shape().ndims(), 2UL); size_t batchSize = output.shape()[0]; - if (!cpuSeq_ || !gpuSeq_) { // sequence not exist + if (!seq1_ || !seq2_) { // sequence not exist addSequence(SequenceIdArg(TensorShape{batchSize})); } size_t size = output.shape().getElements() * sizeOfValuType(output.valueType()); - cpuMemory_.emplace_back(std::make_shared(size)); - gpuMemory_.emplace_back(std::make_shared(size)); + func1Memory_.emplace_back(std::make_shared(size)); + func2Memory_.emplace_back(std::make_shared(size)); /// SequenceArg - cpuOutputs_.emplace_back( - std::make_shared(cpuMemory_.back()->getBuf(), + func1Outputs_.emplace_back( + std::make_shared(func1Memory_.back()->getBuf(), output.valueType(), output.shape(), - *cpuSeq_, + *seq1_, argType)); - gpuOutputs_.emplace_back( - std::make_shared(gpuMemory_.back()->getBuf(), + func2Outputs_.emplace_back( + std::make_shared(func2Memory_.back()->getBuf(), output.valueType(), output.shape(), - *gpuSeq_, + *seq2_, argType)); } void addInputs(const SparseMatrixArg& input) { - cpuSparse_ = std::make_shared( + sparse1_ = std::make_shared( input.shape()[0], input.shape()[1], input.nnz(), static_cast(input.dataType()), static_cast(input.dataFormat())); - gpuSparse_ = std::make_shared( + sparse2_ = std::make_shared( input.shape()[0], input.shape()[1], input.nnz(), @@ -208,12 +261,12 @@ public: /// init sparse matrix hl_stream_t stream(HPPL_STREAM_1); - cpuSparse_->randomizeUniform(); - gpuSparse_->copyFrom(*cpuSparse_, stream); + sparse1_->randomizeUniform(); + sparse2_->copyFrom(*sparse1_, stream); hl_stream_synchronize(stream); - cpuInputs_.emplace_back(std::make_shared(*cpuSparse_)); - gpuInputs_.emplace_back(std::make_shared(*gpuSparse_)); + func1Inputs_.emplace_back(std::make_shared(*sparse1_)); + func2Inputs_.emplace_back(std::make_shared(*sparse2_)); } void run() { @@ -236,27 +289,27 @@ public: function->calc(inArgs, outArgs); }; - callFunction(cpuFunc_.get(), cpuInputs_, cpuOutputs_); - callFunction(gpuFunc_.get(), gpuInputs_, gpuOutputs_); + callFunction(function1_.get(), func1Inputs_, func1Outputs_); + callFunction(function2_.get(), func2Inputs_, func2Outputs_); // check outputs compareOutputs(); } - std::shared_ptr getCpuFunction() const { return cpuFunc_; } + std::shared_ptr getFunction1() const { return function1_; } - std::shared_ptr getGpuFunction() const { return gpuFunc_; } + std::shared_ptr getFunction2() const { return function2_; } protected: // only init cpu argument, gpu argument copy from cpu argument. void initArg(BufferArg& arg) { - CpuVector vector(arg.shape().getElements(), (real*)arg.data()); + Vector1 vector(arg.shape().getElements(), (real*)arg.data()); vector.uniform(0.001, 1); } void initArg(SequenceArg& arg) { /// init only matrix - CpuVector vector(arg.shape().getElements(), (real*)arg.data()); + Vector1 vector(arg.shape().getElements(), (real*)arg.data()); vector.uniform(0.001, 1); } @@ -276,73 +329,72 @@ protected: } void initInputs() { - for (size_t i = 0; i < cpuInputs_.size(); i++) { - if (cpuInputs_[i]->isSparseArg()) { + for (size_t i = 0; i < func1Inputs_.size(); i++) { + if (func1Inputs_[i]->isSparseArg()) { continue; /// sparse matrix already init } - if (cpuInputs_[i]->isSequenceArg()) { - initArg(dynamic_cast(*cpuInputs_[i])); + if (func1Inputs_[i]->isSequenceArg()) { + initArg(dynamic_cast(*func1Inputs_[i])); } else { - initArg(*cpuInputs_[i]); + initArg(*func1Inputs_[i]); } - // TODO: Need a BufferCopy used to copy from one BufferArg to another. - CpuVector cpuVector(cpuInputs_[i]->shape().getElements(), - (real*)cpuInputs_[i]->data()); - GpuVector gpuVector(gpuInputs_[i]->shape().getElements(), - (real*)gpuInputs_[i]->data()); - gpuVector.copyFrom(cpuVector); + copyArg_(*func1Inputs_[i], *func2Inputs_[i]); } } void initOutputs() { - for (size_t i = 0; i < cpuOutputs_.size(); i++) { - if (cpuOutputs_[i]->isSparseArg()) { + for (size_t i = 0; i < func1Outputs_.size(); i++) { + if (func1Outputs_[i]->isSparseArg()) { continue; /// sparse matrix already init } - if (cpuOutputs_[i]->isSequenceArg()) { - initArg(dynamic_cast(*cpuOutputs_[i])); + if (func1Outputs_[i]->isSequenceArg()) { + initArg(dynamic_cast(*func1Outputs_[i])); } else { - initArg(*cpuOutputs_[i]); + initArg(*func1Outputs_[i]); } - // TODO: Need a BufferCopy used to copy from one BufferArg to another. - CpuVector cpuVector(cpuOutputs_[i]->shape().getElements(), - (real*)cpuOutputs_[i]->data()); - GpuVector gpuVector(gpuOutputs_[i]->shape().getElements(), - (real*)gpuOutputs_[i]->data()); - - gpuVector.copyFrom(cpuVector); + copyArg_(*func1Outputs_[i], *func2Outputs_[i]); } } void compareOutputs() { - for (size_t i = 0; i < cpuOutputs_.size(); i++) { + for (size_t i = 0; i < func1Outputs_.size(); i++) { // TODO, Need a BufferCheck used to compare the two buffers. - const auto cpu = cpuOutputs_[i]; - const auto gpu = gpuOutputs_[i]; + const auto cpu = func1Outputs_[i]; + const auto gpu = func2Outputs_[i]; CHECK_EQ(cpu->numElements(), gpu->numElements()); - CpuVector cpuVector(cpu->numElements(), (real*)cpu->data()); - GpuVector gpuVector(gpu->numElements(), (real*)gpu->data()); + Vector1 cpuVector(cpu->numElements(), (real*)cpu->data()); + Vector2 gpuVector(gpu->numElements(), (real*)gpu->data()); autotest::TensorCheckErr(cpuVector, gpuVector); } } protected: - std::shared_ptr cpuFunc_; - std::shared_ptr gpuFunc_; - std::vector cpuMemory_; - std::vector gpuMemory_; - std::vector cpuInputs_; - std::vector cpuOutputs_; - std::vector gpuInputs_; - std::vector gpuOutputs_; - std::shared_ptr cpuSparse_; - std::shared_ptr gpuSparse_; - std::shared_ptr cpuSeq_; - std::shared_ptr gpuSeq_; + std::shared_ptr function1_; + std::shared_ptr function2_; + std::vector> func1Memory_; + std::vector> func2Memory_; + std::vector func1Inputs_; + std::vector func1Outputs_; + std::vector func2Inputs_; + std::vector func2Outputs_; + std::shared_ptr sparse1_; + std::shared_ptr sparse2_; + std::shared_ptr seq1_; + std::shared_ptr seq2_; + test::CopyArgument copyArg_; +}; + +class CpuGpuFuncCompare + : public Compare2Function { +public: + CpuGpuFuncCompare(const std::string& name, const FuncConfig& config) + : Compare2Function(name + "-CPU", name + "-GPU", config) {} + + ~CpuGpuFuncCompare() {} }; } // namespace paddle diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c7a57801ed6098260af5ba22be82ac4ea7c2e601 --- /dev/null +++ b/paddle/function/GemmConvOp.cpp @@ -0,0 +1,386 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "GemmConvOp.h" +#include "GemmFunctor.h" +#include "paddle/math/MemoryHandle.h" + +namespace paddle { + +/* + * imData = [input_channels, input_height, input_width] + * colData = [input_channels, filter_height, filter_width, + * output_height, output_width] + */ +template +class Im2ColFunctor { +public: + void operator()(const T* imData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* colData) { + int channelsCol = inputChannels * filterHeight * filterWidth; + + for (int c = 0; c < channelsCol; ++c) { + int wOffset = c % filterWidth; + int hOffset = (c / filterWidth) % filterHeight; + int c_im = c / filterWidth / filterHeight; + for (int h = 0; h < outputHeight; ++h) { + for (int w = 0; w < outputWidth; ++w) { + int imRowIdx = h * strideHeight + hOffset; + int imColIdx = w * strideWidth + wOffset; + if ((imRowIdx - paddingHeight) < 0 || + (imRowIdx - paddingHeight) >= inputHeight || + (imColIdx - paddingWidth) < 0 || + (imColIdx - paddingWidth) >= inputWidth) { + colData[(c * outputHeight + h) * outputWidth + w] = T(0); + } else { + imRowIdx += c_im * inputHeight - paddingHeight; + imColIdx -= paddingWidth; + colData[(c * outputHeight + h) * outputWidth + w] = + imData[imRowIdx * inputWidth + imColIdx]; + } + } + } + } + } +}; + +template +class Col2ImFunctor { +public: + void operator()(const T* colData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* imData) { + int channelsCol = inputChannels * filterHeight * filterWidth; + + for (int c = 0; c < channelsCol; ++c) { + int wOffset = c % filterWidth; + int hOffset = (c / filterWidth) % filterHeight; + int c_im = c / filterWidth / filterHeight; + for (int h = 0; h < outputHeight; ++h) { + for (int w = 0; w < outputWidth; ++w) { + int imRowIdx = h * strideHeight + hOffset; + int imColIdx = w * strideWidth + wOffset; + if ((imRowIdx - paddingHeight) >= 0 && + (imRowIdx - paddingHeight) < inputHeight && + (imColIdx - paddingWidth) >= 0 && + (imColIdx - paddingWidth) < inputWidth) { + imRowIdx += c_im * inputHeight - paddingHeight; + imColIdx -= paddingWidth; + imData[imRowIdx * inputWidth + imColIdx] += + colData[(c * outputHeight + h) * outputWidth + w]; + } + } + } + } + } +}; + +/* + * \brief Forward calculation of convolution. + */ +template +class GemmConvFunction : public ConvFunctionBase { +public: + void init(const FuncConfig& config) override { + ConvFunctionBase::init(config); + } + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(numInputs_, inputs.size()); + CHECK_EQ(numOutputs_, outputs.size()); + // TODO(hedaoyuan): Need to define some index macros, + // to avoid useing 0 and 1. + const TensorShape& input = inputs[0].shape(); + const TensorShape& filter = inputs[1].shape(); + const TensorShape& output = outputs[0].shape(); + check(input, filter, output); + + real beta; + if (outputs[0].getArgType() == ADD_TO) { + beta = 1.0; + } else { + beta = 0.0; + } + + size_t batchSize = input[0]; + size_t inputChannels = input[1]; + size_t inputHeight = input[2]; + size_t inputWidth = input[3]; + size_t filterHeight = getFilterHeight(filter); + size_t filterWidth = getFilterWidth(filter); + size_t outputChannels = output[1]; + size_t outputHeight = output[2]; + size_t outputWidth = output[3]; + + real* inputData = inputs[0].data(); + real* filterData = inputs[1].data(); + real* outputData = outputs[0].data(); + + size_t size = inputChannels / groups_ * filterHeight * filterWidth * + outputHeight * outputWidth; + resizeBuffer(size); + real* colData = reinterpret_cast(memory_->getBuf()); + + Im2ColFunctor im2col; + GemmFunctor gemm; + size_t inputOffset = (inputChannels / groups_) * inputHeight * inputWidth; + size_t outputOffset = + (outputChannels / groups_) * outputHeight * outputWidth; + size_t filterOffset = filter.getElements() / groups_; + + for (size_t i = 0; i < batchSize; i++) { + for (size_t g = 0; g < groups_; g++) { + im2col(inputData + g * inputOffset, + inputChannels / groups_, + inputHeight, + inputWidth, + filterHeight, + filterWidth, + strideH(), + strideW(), + paddingH(), + paddingW(), + outputHeight, + outputWidth, + colData); + + int M = outputChannels / groups_; + int N = outputHeight * outputWidth; + int K = inputChannels / groups_ * filterHeight * filterWidth; + gemm(CblasNoTrans, + CblasNoTrans, + M, + N, + K, + 1.0f, + filterData + g * filterOffset, + K, + colData, + N, + beta, + outputData + g * outputOffset, + N); + } + inputData += inputChannels * inputHeight * inputWidth; + outputData += outputChannels * outputHeight * outputWidth; + } + } +}; + +/* + * \brief Backward input calculation of convolution. + */ +template +class GemmConvGradInputFunction : public ConvFunctionBase { +public: + void init(const FuncConfig& config) override { + ConvFunctionBase::init(config); + } + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(numInputs_, inputs.size()); + CHECK_EQ(numOutputs_, outputs.size()); + // Since the implementation of Col2ImFunctor is ADD_TO, + // this function only supports ADD_TO mode. + CHECK_EQ(outputs[0].getArgType(), ADD_TO); + const TensorShape& output = inputs[0].shape(); + const TensorShape& filter = inputs[1].shape(); + const TensorShape& input = outputs[0].shape(); + check(input, filter, output); + + size_t batchSize = input[0]; + size_t inputChannels = input[1]; + size_t inputHeight = input[2]; + size_t inputWidth = input[3]; + size_t filterHeight = getFilterHeight(filter); + size_t filterWidth = getFilterWidth(filter); + size_t outputChannels = output[1]; + size_t outputHeight = output[2]; + size_t outputWidth = output[3]; + + real* outputGrad = inputs[0].data(); + real* filterData = inputs[1].data(); + real* inputGrad = outputs[0].data(); + + size_t size = inputChannels / groups_ * filterHeight * filterWidth * + outputHeight * outputWidth; + resizeBuffer(size); + real* colData = reinterpret_cast(memory_->getBuf()); + + Col2ImFunctor col2im; + GemmFunctor gemm; + size_t inputOffset = (inputChannels / groups_) * inputHeight * inputWidth; + size_t outputOffset = + (outputChannels / groups_) * outputHeight * outputWidth; + size_t filterOffset = filter.getElements() / groups_; + + for (size_t i = 0; i < batchSize; i++) { + for (size_t g = 0; g < groups_; g++) { + int K = outputChannels / groups_; + int N = outputHeight * outputWidth; + int M = inputChannels / groups_ * filterHeight * filterWidth; + gemm(CblasTrans, + CblasNoTrans, + M, + N, + K, + 1.0f, + filterData + g * filterOffset, + M, + outputGrad + g * outputOffset, + N, + 0.0f, + colData, + N); + + col2im(colData, + inputChannels / groups_, + inputHeight, + inputWidth, + filterHeight, + filterWidth, + strideH(), + strideW(), + paddingH(), + paddingW(), + outputHeight, + outputWidth, + inputGrad + g * inputOffset); + } + inputGrad += inputChannels * inputHeight * inputWidth; + outputGrad += outputChannels * outputHeight * outputWidth; + } + } +}; + +/* + * \brief Backward filter calculation of convolution. + */ +template +class GemmConvGradFilterFunction : public ConvFunctionBase { +public: + void init(const FuncConfig& config) override { + ConvFunctionBase::init(config); + } + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(numInputs_, inputs.size()); + CHECK_EQ(numOutputs_, outputs.size()); + const TensorShape& output = inputs[0].shape(); + const TensorShape& input = inputs[1].shape(); + const TensorShape& filter = outputs[0].shape(); + check(input, filter, output); + + real beta; + if (outputs[0].getArgType() == ADD_TO) { + beta = 1.0; + } else { + beta = 0.0; + } + + size_t batchSize = input[0]; + size_t inputChannels = input[1]; + size_t inputHeight = input[2]; + size_t inputWidth = input[3]; + size_t filterHeight = getFilterHeight(filter); + size_t filterWidth = getFilterWidth(filter); + size_t outputChannels = output[1]; + size_t outputHeight = output[2]; + size_t outputWidth = output[3]; + + real* outputGrad = inputs[0].data(); + real* inputData = inputs[1].data(); + real* filterGrad = outputs[0].data(); + + size_t size = inputChannels / groups_ * filterHeight * filterWidth * + outputHeight * outputWidth; + resizeBuffer(size); + real* colData = reinterpret_cast(memory_->getBuf()); + + Im2ColFunctor im2col; + GemmFunctor gemm; + size_t inputOffset = (inputChannels / groups_) * inputHeight * inputWidth; + size_t outputOffset = + (outputChannels / groups_) * outputHeight * outputWidth; + size_t filterOffset = filter.getElements() / groups_; + for (size_t i = 0; i < batchSize; i++) { + for (size_t g = 0; g < groups_; g++) { + im2col(inputData + g * inputOffset, + inputChannels / groups_, + inputHeight, + inputWidth, + filterHeight, + filterWidth, + strideH(), + strideW(), + paddingH(), + paddingW(), + outputHeight, + outputWidth, + colData); + + int M = outputChannels / groups_; + int K = outputHeight * outputWidth; + int N = inputChannels / groups_ * filterHeight * filterWidth; + gemm(CblasNoTrans, + CblasTrans, + M, + N, + K, + 1.0f, + outputGrad + g * outputOffset, + K, + colData, + K, + i == 0 ? beta : 1.0f, + filterGrad + g * filterOffset, + N); + } + inputData += inputChannels * inputHeight * inputWidth; + outputGrad += outputChannels * outputHeight * outputWidth; + } + } +}; + +REGISTER_TYPED_FUNC(GemmConv, CPU, GemmConvFunction); +REGISTER_TYPED_FUNC(GemmConvGradInput, CPU, GemmConvGradInputFunction); +REGISTER_TYPED_FUNC(GemmConvGradFilter, CPU, GemmConvGradFilterFunction); +#ifndef PADDLE_ONLY_CPU +REGISTER_TYPED_FUNC(GemmConv, GPU, GemmConvFunction); +REGISTER_TYPED_FUNC(GemmConvGradInput, GPU, GemmConvGradInputFunction); +REGISTER_TYPED_FUNC(GemmConvGradFilter, GPU, GemmConvGradFilterFunction); +#endif + +} // namespace paddle diff --git a/paddle/function/GemmConvOp.h b/paddle/function/GemmConvOp.h new file mode 100644 index 0000000000000000000000000000000000000000..9f11cce597a07ce2a54f518be30b657c26ab7516 --- /dev/null +++ b/paddle/function/GemmConvOp.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "ConvOp.h" + +namespace paddle { + +/* + * imData = [input_channels, input_height, input_width] + * colData = [input_channels, filter_height, filter_width, + * output_height, output_width] + */ +template +class Im2ColFunctor { +public: + void operator()(const T* imData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* colData); +}; + +template +class Col2ImFunctor { +public: + void operator()(const T* colData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* imData); +}; + +} // namespace paddle diff --git a/paddle/function/GemmConvOpGpu.cu b/paddle/function/GemmConvOpGpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a1795ff0fb5643ea436c94fe893fe866056fccb --- /dev/null +++ b/paddle/function/GemmConvOpGpu.cu @@ -0,0 +1,186 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvOp.h" +#include "GemmConvOp.h" + +namespace paddle { + +template +__global__ +void im2col(const T* data_im, int numOuts, int height, int width, + int blockH, int blockW, + int strideH, int strideW, + int paddingH, int paddingW, + int height_col, int width_col, + T* data_col) { + int index = + (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + if (index < numOuts) { + int w_out = index % width_col; + index /= width_col; + int h_out = index % height_col; + int channel_in = index / height_col; + int channel_out = channel_in * blockH * blockW; + int h_in = h_out * strideH; + int w_in = w_out * strideW; + + data_col += (channel_out * height_col + h_out) * width_col + w_out; + for (int i = 0; i < blockH; ++i) { + for (int j = 0; j < blockW; ++j) { + int rIdx = int(h_in+i); + int cIdx = int(w_in+j); + if ((rIdx-(int)paddingH) >= (int)height || + (rIdx-(int)paddingH) < 0 || + (cIdx-(int)paddingW) >= (int)width || + (cIdx-(int)paddingW) < 0) { + *data_col = 0; + } else { + rIdx = rIdx + channel_in*height - paddingH; + cIdx = cIdx - paddingW; + *data_col = data_im[rIdx* width + cIdx]; + } + data_col += height_col * width_col; + } + } + } +} + +template +class Im2ColFunctor { +public: + void operator()(const T* imData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* colData) { + int numKernels = inputChannels * outputHeight * outputWidth; + int blocks = (numKernels + 1024 -1) / 1024; + int blockX = 512; + int blockY = (blocks + 512 - 1) / 512; + dim3 threads(1024, 1); + dim3 grid(blockX, blockY); + im2col<<< grid, threads, 0, STREAM_DEFAULT >>> + (imData, numKernels, inputHeight, inputWidth, filterHeight, filterWidth, + strideHeight, strideWidth, paddingHeight, paddingWidth, + outputHeight, outputWidth, colData); + CHECK_SYNC("Im2ColFunctor GPU failed"); + } +}; + +template +__global__ +void col2im(size_t n, const T* data_col, size_t height, + size_t width, size_t channels, + size_t blockH, size_t blockW, + size_t strideH, size_t strideW, + size_t paddingH, size_t paddingW, + size_t height_col, size_t width_col, + T* data_im) { + size_t index = + (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + if (index < n) { + T val = 0; + int w = int(index % width); + int h = int((index / width) % height); + int c = int(index / (width * height)); + if ((w - (int)paddingW) >= 0 && + (w - (int)paddingW) < (width-2 * paddingW) && + (h - (int)paddingH) >= 0 && + (h - paddingH) < (height - 2 * paddingH)) { + // compute the start and end of the output + int w_col_start = + (w < (int)blockW) ? 0 : (w - int(blockW)) / (int)strideW + 1; + int w_col_end = + min((int)(w / (int)strideW + 1), (int)(width_col)); + int h_col_start = + (h < (int)blockH) ? 0 : (h - (int)blockH) / (int)strideH + 1; + int h_col_end = min(int(h / strideH + 1), int(height_col)); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + // the col location: [c * width * height + h_out, w_out] + int c_col = int(c * blockH* blockW) + \ + (h - h_col * (int)strideH) * (int)blockW + + (w - w_col * (int)strideW); + val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + h -= paddingH; + w -= paddingW; + data_im[c*((width-2*paddingW) * (height-2*paddingH)) + + h*(width-2*paddingW) + w] += val; + } + } +} + +template +class Col2ImFunctor { +public: + void operator()(const T* colData, + int inputChannels, + int inputHeight, + int inputWidth, + int filterHeight, + int filterWidth, + int strideHeight, + int strideWidth, + int paddingHeight, + int paddingWidth, + int outputHeight, + int outputWidth, + T* imData) { + size_t numKernels = inputChannels * (inputHeight + 2*paddingHeight) + * (inputWidth + 2*paddingWidth); + + size_t blocks = (numKernels + 1024 -1) / 1024; + size_t blockX = 512; + size_t blockY = (blocks+512-1)/512; + dim3 threads(1024, 1); + dim3 grid(blockX, blockY); + + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + col2im<<< grid, threads, 0, STREAM_DEFAULT >>> + (numKernels, + colData, + inputHeight + 2*paddingHeight, + inputWidth + 2*paddingWidth, + inputChannels, + filterHeight, + filterWidth, + strideHeight, + strideWidth, + paddingHeight, + paddingWidth, + outputHeight, + outputWidth, + imData); + CHECK_SYNC("Col2ImFunctor GPU failed"); + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +} // namespace paddle diff --git a/paddle/function/GemmFunctor.h b/paddle/function/GemmFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..d5db5cf5e7a855d89b262fe8cf42aa2c55f419f1 --- /dev/null +++ b/paddle/function/GemmFunctor.h @@ -0,0 +1,96 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/math/MathFunctions.h" + +namespace paddle { + +// TODO(hedaoyuan): Since the hl_matrix_mul interface does not conform to the +// cblas_dgemm interface's parameter format, it is necessary to introduce +// GemmFunctor as a new interface. Later, when considering the implementation +// of MatMulFunction, we need to consider the reconstruction of hl_matrix_mul +// interface. +template +class GemmFunctor { +public: + void operator()(const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE TransB, + const int M, + const int N, + const int K, + const T alpha, + const T* A, + const int lda, + const T* B, + const int ldb, + const T beta, + T* C, + const int ldc); +}; + +template +class GemmFunctor { +public: + void operator()(const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE TransB, + const int M, + const int N, + const int K, + const T alpha, + const T* A, + const int lda, + const T* B, + const int ldb, + const T beta, + T* C, + const int ldc) { + gemm(transA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); + } +}; + +template +class GemmFunctor { +public: + void operator()(const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE TransB, + const int M, + const int N, + const int K, + const T alpha, + const T* A, + const int lda, + const T* B, + const int ldb, + const T beta, + T* C, + const int ldc) { + hl_matrix_mul((T*)A, + transA == CblasNoTrans ? HPPL_OP_N : HPPL_OP_T, + (T*)B, + TransB == CblasNoTrans ? HPPL_OP_N : HPPL_OP_T, + C, + M, + N, + K, + alpha, + beta, + lda, + ldb, + ldc); + } +}; + +} // namespace paddle diff --git a/paddle/function/MulOpTest.cpp b/paddle/function/MulOpTest.cpp index 8753057ebf73c99336b2f5d9c610e4aaf293f845..d31eb0c74f25f5c2ef910264bdf2779e16b1a004 100644 --- a/paddle/function/MulOpTest.cpp +++ b/paddle/function/MulOpTest.cpp @@ -35,7 +35,7 @@ void testFuncDDDMatrix( size_t heightC = dimM; size_t widthC = dimN; // init Test object - FunctionCompare test( + CpuGpuFuncCompare test( "MulOp", FuncConfig().set("aTrans", transa).set("bTrans", transb)); // prepare input arguments /// matrix A : HA * WA @@ -81,8 +81,8 @@ void testFuncDSparseDMatrix( size_t dimM, size_t dimN, size_t dimK, size_t nnz, SparseFormat FORMAT) { real scaleT = 1.0; // init Test object - FunctionCompare test("MulOp", - FuncConfig().set("aTrans", false).set("bTrans", false)); + CpuGpuFuncCompare test( + "MulOp", FuncConfig().set("aTrans", false).set("bTrans", false)); // prepare input arguments /// sparse matrix A : M * K test.addInputs(SparseMatrixArg( @@ -126,8 +126,8 @@ void testFuncDDSparseMatrix( size_t dimM, size_t dimN, size_t dimK, size_t nnz, SparseFormat FORMAT) { real scaleT = 1.0; // init Test object - FunctionCompare test("MulOp", - FuncConfig().set("aTrans", false).set("bTrans", false)); + CpuGpuFuncCompare test( + "MulOp", FuncConfig().set("aTrans", false).set("bTrans", false)); // prepare input arguments /// matrix A : M * K test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{dimM, dimK})); @@ -172,8 +172,8 @@ void testFuncSparseDDMatrix( size_t dimM, size_t dimN, size_t dimK, size_t nnz, SparseFormat FORMAT) { real scaleT = 1.0; // init Test object - FunctionCompare test("MulOp", - FuncConfig().set("aTrans", false).set("bTrans", false)); + CpuGpuFuncCompare test( + "MulOp", FuncConfig().set("aTrans", false).set("bTrans", false)); // prepare input arguments /// matrix A : M * K test.addInputs(BufferArg(VALUE_TYPE_FLOAT, TensorShape{dimM, dimK})); diff --git a/paddle/function/NaiveConvOp.cpp b/paddle/function/NaiveConvOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d204f99e0e127688eeda28b46715a37c1100c4e --- /dev/null +++ b/paddle/function/NaiveConvOp.cpp @@ -0,0 +1,137 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvOp.h" + +namespace paddle { + +/* + * The three arguments are stored in memory in row major order. + * inputData = [batchSize, inputChannels, inputHeight, inputWidth] + * filterData = [outputChannels, inputChannels, filterHeight, filterWidth] + * outputData = [batchSize, outputChannels, outputHeight, outputWidth] + */ +template +class NaiveConvFunctor { +public: + void operator()(const T* inputData, + size_t batchSize, + size_t inputChannels, + size_t inputHeight, + size_t inputWidth, + const T* filterData, + size_t filterHeight, + size_t filterWidth, + T* outputData, + size_t outputChannels, + size_t outputHeight, + size_t outputWidth, + size_t paddingH, + size_t paddingW, + size_t strideH, + size_t strideW) { + for (size_t batch = 0; batch < batchSize; batch++) { + for (size_t outC = 0; outC < outputChannels; outC++) { + for (size_t outH = 0; outH < outputHeight; outH++) { + for (size_t outW = 0; outW < outputWidth; outW++) { + const int inStartH = (outH * strideH) - paddingH; + const int inStartW = (outW * strideW) - paddingW; + T outValue = (T)0; + for (size_t inC = 0; inC < inputChannels; inC++) { + for (size_t fH = 0; fH < filterHeight; fH++) { + for (size_t fW = 0; fW < filterWidth; fW++) { + T inValue; + const int inH = inStartH + fH; + const int inW = inStartW + fW; + if ((inH >= 0 && inH < inputHeight) && + (inW >= 0 && inW < inputWidth)) { + size_t offsetInput = + batch * inputChannels * inputHeight * inputWidth + + inC * inputHeight * inputWidth + inH * inputWidth + inW; + inValue = inputData[offsetInput]; + } else { + inValue = (T)0; + } + size_t offsetFilter = + outC * inputChannels * filterHeight * filterWidth + + inC * filterHeight * filterWidth + fH * filterWidth + fW; + T filterValue = filterData[offsetFilter]; + outValue += (inValue * filterValue); + } + } + } + + size_t offset = + batch * outputChannels * outputHeight * outputWidth + + outC * outputHeight * outputWidth + outH * outputWidth + outW; + outputData[offset] = outValue; + } + } + } + } + } +}; + +template +class NaiveConvFunction : public ConvFunctionBase { +public: + void init(const FuncConfig& config) override { + ConvFunctionBase::init(config); + } + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(numInputs_, inputs.size()); + CHECK_EQ(numOutputs_, outputs.size()); + const TensorShape& input = inputs[0].shape(); + const TensorShape& filter = inputs[1].shape(); + const TensorShape& output = outputs[0].shape(); + check(input, filter, output); + CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO); + + size_t batchSize = inputs[0].shape()[0]; + size_t inputChannels = inputs[0].shape()[1]; + size_t inputHeight = inputs[0].shape()[2]; + size_t inputWidth = inputs[0].shape()[3]; + size_t filterHeight = inputs[1].shape()[2]; + size_t filterWidth = inputs[1].shape()[3]; + size_t outputChannels = outputs[0].shape()[1]; + size_t outputHeight = outputs[0].shape()[2]; + size_t outputWidth = outputs[0].shape()[3]; + + real* inputData = inputs[0].data(); + real* filterData = inputs[1].data(); + real* outputData = outputs[0].data(); + NaiveConvFunctor conv; + conv(inputData, + batchSize, + inputChannels, + inputHeight, + inputWidth, + filterData, + filterHeight, + filterWidth, + outputData, + outputChannels, + outputHeight, + outputWidth, + paddingH(), + paddingW(), + strideH(), + strideW()); + } +}; + +REGISTER_TYPED_FUNC(NaiveConv, CPU, NaiveConvFunction); + +} // namespace paddle diff --git a/paddle/function/PadOpTest.cpp b/paddle/function/PadOpTest.cpp index f77ac2a8c49c83f2d6c64c2a30b6a2f2eb09ac10..e286f4e5b8a42348b9d23fd4c3ad44194ca1f299 100644 --- a/paddle/function/PadOpTest.cpp +++ b/paddle/function/PadOpTest.cpp @@ -25,7 +25,7 @@ TEST(Pad, real) { VLOG(3) << " numSamples=" << numSamples << " channels=" << channels << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; for (bool test_grad : {false, true}) { - FunctionCompare compare( + CpuGpuFuncCompare compare( test_grad ? "PadGrad" : "Pad", FuncConfig() .set>("channel", {2, 3}) diff --git a/paddle/function/RowConvOpTest.cpp b/paddle/function/RowConvOpTest.cpp index 1c95d3ff2cccbf33f4c5f91f6daf340871a8f7b0..f52d18b0491ec444e2fe89fe8fb5c1baa128823e 100644 --- a/paddle/function/RowConvOpTest.cpp +++ b/paddle/function/RowConvOpTest.cpp @@ -18,7 +18,7 @@ limitations under the License. */ namespace paddle { void testRowConvFw(size_t batchSize, size_t dim, size_t contextLength) { - FunctionCompare test("RowConv", FuncConfig()); + CpuGpuFuncCompare test("RowConv", FuncConfig()); test.addSequence(SequenceIdArg(TensorShape{batchSize})); test.addInputs(SequenceArg(VALUE_TYPE_FLOAT, TensorShape{batchSize, dim})); @@ -31,7 +31,7 @@ void testRowConvFw(size_t batchSize, size_t dim, size_t contextLength) { } void testRowConvBw(size_t batchSize, size_t dim, size_t contextLength) { - FunctionCompare test("RowConvGrad", FuncConfig()); + CpuGpuFuncCompare test("RowConvGrad", FuncConfig()); test.addSequence(SequenceIdArg(TensorShape{batchSize})); test.addInputs(SequenceArg(VALUE_TYPE_FLOAT, TensorShape{batchSize, dim})); diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 7b234dc2a6663dc677affcae7dc6306c104c1250..e161d89c38a290000a2cbdb2905e56901ae4c144 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -118,11 +118,7 @@ size_t ConvBaseLayer::calOutputSize() { layerSize = outH[0] * outW[0] * size_t(numFilters_); }; - if (isDeconv_) { - setLayerSize(outputH_, outputW_, imgSizeH_, imgSizeW_); - } else { - setLayerSize(imgSizeH_, imgSizeW_, outputH_, outputW_); - } + setLayerSize(imgSizeH_, imgSizeW_, outputH_, outputW_); return layerSize; } diff --git a/paddle/gserver/layers/CudnnConvBaseLayer.cpp b/paddle/gserver/layers/CudnnConvBaseLayer.cpp index 24363bb8b09cc354c25abe512257be68566c10e1..c056bbe4d1d354751d4f85f8d0743cf30486c087 100644 --- a/paddle/gserver/layers/CudnnConvBaseLayer.cpp +++ b/paddle/gserver/layers/CudnnConvBaseLayer.cpp @@ -70,14 +70,8 @@ void CudnnConvBaseLayer::forward(PassType passType) { if (biases_) { REGISTER_TIMER_INFO("CudnnConvBiasTimer", getName().c_str()); int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - int outH, outW; - if (isDeconv_) { - outH = imgSizeH_[0]; - outW = imgSizeW_[0]; - } else { - outH = outputH_[0]; - outW = outputW_[0]; - } + int outH = outputH_[0]; + int outW = outputW_[0]; hl_tensor_reshape(outputDesc_, batchSize, diff --git a/paddle/gserver/layers/DetectionUtil.cpp b/paddle/gserver/layers/DetectionUtil.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3e61adc66e60c54250e4f323452aa13045310879 --- /dev/null +++ b/paddle/gserver/layers/DetectionUtil.cpp @@ -0,0 +1,576 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "DetectionUtil.h" + +namespace paddle { + +size_t appendWithPermute(const Matrix& inMatrix, + size_t height, + size_t width, + size_t outTotalSize, + size_t outOffset, + size_t batchSize, + Matrix& outMatrix, + PermMode permMode) { + CHECK_EQ(inMatrix.useGpu(), outMatrix.useGpu()); + bool useGpu = inMatrix.useGpu(); + if (permMode == kNCHWToNHWC) { + size_t inElementCnt = inMatrix.getElementCnt(); + size_t channels = inElementCnt / (height * width * batchSize); + size_t imgSize = height * width; + for (size_t i = 0; i < batchSize; ++i) { + size_t offset = i * (outTotalSize / batchSize) + outOffset; + const MatrixPtr inTmp = Matrix::create( + const_cast(inMatrix.getData()) + i * channels * imgSize, + channels, + imgSize, + false, + useGpu); + MatrixPtr outTmp = + Matrix::create(const_cast(outMatrix.getData()) + offset, + imgSize, + channels, + false, + useGpu); + inTmp->transpose(outTmp, false); + } + return channels * imgSize; + } else { + LOG(FATAL) << "Unkown permute mode"; + } +} + +size_t decomposeWithPermute(const Matrix& inMatrix, + size_t height, + size_t width, + size_t inTotalSize, + size_t inOffset, + size_t batchSize, + Matrix& outMatrix, + PermMode permMode) { + CHECK_EQ(inMatrix.useGpu(), outMatrix.useGpu()); + bool useGpu = inMatrix.useGpu(); + if (permMode == kNHWCToNCHW) { + size_t outElementCnt = outMatrix.getElementCnt(); + size_t channels = outElementCnt / (height * width * batchSize); + size_t imgSize = height * width; + for (size_t i = 0; i < batchSize; ++i) { + size_t offset = i * (inTotalSize / batchSize) + inOffset; + const MatrixPtr inTmp = + Matrix::create(const_cast(inMatrix.getData()) + offset, + imgSize, + channels, + false, + useGpu); + MatrixPtr outTmp = Matrix::create( + const_cast(outMatrix.getData()) + i * channels * imgSize, + channels, + imgSize, + false, + useGpu); + inTmp->transpose(outTmp, false); + } + return channels * imgSize; + } else { + LOG(FATAL) << "Unkown permute mode"; + } +} + +real jaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) { + if (bbox2.xMin > bbox1.xMax || bbox2.xMax < bbox1.xMin || + bbox2.yMin > bbox1.yMax || bbox2.yMax < bbox1.yMin) { + return 0.0; + } else { + real interXMin = std::max(bbox1.xMin, bbox2.xMin); + real interYMin = std::max(bbox1.yMin, bbox2.yMin); + real interXMax = std::min(bbox1.xMax, bbox2.xMax); + real interYMax = std::min(bbox1.yMax, bbox2.yMax); + + real interWidth = interXMax - interXMin; + real interHeight = interYMax - interYMin; + real interArea = interWidth * interHeight; + + real bboxArea1 = bbox1.getArea(); + real bboxArea2 = bbox2.getArea(); + + return interArea / (bboxArea1 + bboxArea2 - interArea); + } +} + +void encodeBBoxWithVar(const NormalizedBBox& priorBBox, + const vector& priorBBoxVar, + const NormalizedBBox& gtBBox, + vector& outVec) { + real priorBBoxWidth = priorBBox.getWidth(); + real priorBBoxHeight = priorBBox.getHeight(); + real priorBBoxCenterX = priorBBox.getCenterX(); + real priorBBoxCenterY = priorBBox.getCenterY(); + + real gtBBoxWidth = gtBBox.getWidth(); + real gtBBoxHeight = gtBBox.getHeight(); + real gtBBoxCenterX = gtBBox.getCenterX(); + real gtBBoxCenterY = gtBBox.getCenterY(); + + outVec.clear(); + outVec.push_back((gtBBoxCenterX - priorBBoxCenterX) / priorBBoxWidth / + priorBBoxVar[0]); + outVec.push_back((gtBBoxCenterY - priorBBoxCenterY) / priorBBoxHeight / + priorBBoxVar[1]); + outVec.push_back(std::log(std::fabs(gtBBoxWidth / priorBBoxWidth)) / + priorBBoxVar[2]); + outVec.push_back(std::log(std::fabs(gtBBoxHeight / priorBBoxHeight)) / + priorBBoxVar[3]); +} + +NormalizedBBox decodeBBoxWithVar(const NormalizedBBox& priorBBox, + const vector& priorBBoxVar, + const vector& locPredData) { + real priorBBoxWidth = priorBBox.getWidth(); + real priorBBoxHeight = priorBBox.getHeight(); + real priorBBoxCenterX = priorBBox.getCenterX(); + real priorBBoxCenterY = priorBBox.getCenterY(); + + real decodedBBoxCenterX = + priorBBoxVar[0] * locPredData[0] * priorBBoxWidth + priorBBoxCenterX; + real decodedBBoxCenterY = + priorBBoxVar[1] * locPredData[1] * priorBBoxHeight + priorBBoxCenterY; + real decodedBBoxWidth = + std::exp(priorBBoxVar[2] * locPredData[2]) * priorBBoxWidth; + real decodedBBoxHeight = + std::exp(priorBBoxVar[3] * locPredData[3]) * priorBBoxHeight; + + NormalizedBBox decodedBBox; + decodedBBox.xMin = decodedBBoxCenterX - decodedBBoxWidth / 2; + decodedBBox.yMin = decodedBBoxCenterY - decodedBBoxHeight / 2; + decodedBBox.xMax = decodedBBoxCenterX + decodedBBoxWidth / 2; + decodedBBox.yMax = decodedBBoxCenterY + decodedBBoxHeight / 2; + + return decodedBBox; +} + +void getBBoxFromPriorData(const real* priorData, + const size_t numBBoxes, + vector& bboxVec) { + size_t outOffset = bboxVec.size(); + bboxVec.resize(bboxVec.size() + numBBoxes); + for (size_t i = 0; i < numBBoxes; ++i) { + NormalizedBBox bbox; + bbox.xMin = *(priorData + i * 8); + bbox.yMin = *(priorData + i * 8 + 1); + bbox.xMax = *(priorData + i * 8 + 2); + bbox.yMax = *(priorData + i * 8 + 3); + bboxVec[outOffset + i] = bbox; + } +} + +void getBBoxVarFromPriorData(const real* priorData, + const size_t num, + vector>& varVec) { + size_t outOffset = varVec.size(); + varVec.resize(varVec.size() + num); + for (size_t i = 0; i < num; ++i) { + vector var; + var.push_back(*(priorData + i * 8 + 4)); + var.push_back(*(priorData + i * 8 + 5)); + var.push_back(*(priorData + i * 8 + 6)); + var.push_back(*(priorData + i * 8 + 7)); + varVec[outOffset + i] = var; + } +} + +void getBBoxFromLabelData(const real* labelData, + const size_t numBBoxes, + vector& bboxVec) { + size_t outOffset = bboxVec.size(); + bboxVec.resize(bboxVec.size() + numBBoxes); + for (size_t i = 0; i < numBBoxes; ++i) { + NormalizedBBox bbox; + bbox.xMin = *(labelData + i * 6 + 1); + bbox.yMin = *(labelData + i * 6 + 2); + bbox.xMax = *(labelData + i * 6 + 3); + bbox.yMax = *(labelData + i * 6 + 4); + real isDifficult = *(labelData + i * 6 + 5); + if (std::abs(isDifficult - 0.0) < 1e-6) + bbox.isDifficult = false; + else + bbox.isDifficult = true; + bboxVec[outOffset + i] = bbox; + } +} + +void getBBoxFromDetectData(const real* detectData, + const size_t numBBoxes, + vector& labelVec, + vector& scoreVec, + vector& bboxVec) { + size_t outOffset = bboxVec.size(); + labelVec.resize(outOffset + numBBoxes); + scoreVec.resize(outOffset + numBBoxes); + bboxVec.resize(outOffset + numBBoxes); + for (size_t i = 0; i < numBBoxes; ++i) { + labelVec[outOffset + i] = *(detectData + i * 7 + 1); + scoreVec[outOffset + i] = *(detectData + i * 7 + 2); + NormalizedBBox bbox; + bbox.xMin = *(detectData + i * 7 + 3); + bbox.yMin = *(detectData + i * 7 + 4); + bbox.xMax = *(detectData + i * 7 + 5); + bbox.yMax = *(detectData + i * 7 + 6); + bboxVec[outOffset + i] = bbox; + } +} + +void matchBBox(const vector& priorBBoxes, + const vector& gtBBoxes, + real overlapThreshold, + vector* matchIndices, + vector* matchOverlaps) { + map> overlaps; + size_t numPriors = priorBBoxes.size(); + size_t numGTs = gtBBoxes.size(); + + matchIndices->clear(); + matchIndices->resize(numPriors, -1); + matchOverlaps->clear(); + matchOverlaps->resize(numPriors, 0.0); + + // Store the positive overlap between predictions and ground truth + for (size_t i = 0; i < numPriors; ++i) { + for (size_t j = 0; j < numGTs; ++j) { + real overlap = jaccardOverlap(priorBBoxes[i], gtBBoxes[j]); + if (overlap > 1e-6) { + (*matchOverlaps)[i] = std::max((*matchOverlaps)[i], overlap); + overlaps[i][j] = overlap; + } + } + } + // Bipartite matching + vector gtPool; + for (size_t i = 0; i < numGTs; ++i) { + gtPool.push_back(i); + } + while (gtPool.size() > 0) { + // Find the most overlapped gt and corresponding predictions + int maxPriorIdx = -1; + int maxGTIdx = -1; + real maxOverlap = -1.0; + for (map>::iterator it = overlaps.begin(); + it != overlaps.end(); + ++it) { + size_t i = it->first; + if ((*matchIndices)[i] != -1) { + // The prediction already has matched ground truth or is ignored + continue; + } + for (size_t p = 0; p < gtPool.size(); ++p) { + int j = gtPool[p]; + if (it->second.find(j) == it->second.end()) { + // No overlap between the i-th prediction and j-th ground truth + continue; + } + // Find the maximum overlapped pair + if (it->second[j] > maxOverlap) { + maxPriorIdx = (int)i; + maxGTIdx = (int)j; + maxOverlap = it->second[j]; + } + } + } + if (maxPriorIdx == -1) { + break; + } else { + (*matchIndices)[maxPriorIdx] = maxGTIdx; + (*matchOverlaps)[maxPriorIdx] = maxOverlap; + gtPool.erase(std::find(gtPool.begin(), gtPool.end(), maxGTIdx)); + } + } + + // Get most overlaped for the rest prediction bboxes + for (map>::iterator it = overlaps.begin(); + it != overlaps.end(); + ++it) { + size_t i = it->first; + if ((*matchIndices)[i] != -1) { + // The prediction already has matched ground truth or is ignored + continue; + } + int maxGTIdx = -1; + real maxOverlap = -1; + for (size_t j = 0; j < numGTs; ++j) { + if (it->second.find(j) == it->second.end()) { + // No overlap between the i-th prediction and j-th ground truth + continue; + } + // Find the maximum overlapped pair + real overlap = it->second[j]; + if (overlap > maxOverlap && overlap >= overlapThreshold) { + maxGTIdx = j; + maxOverlap = overlap; + } + } + if (maxGTIdx != -1) { + (*matchIndices)[i] = maxGTIdx; + (*matchOverlaps)[i] = maxOverlap; + } + } +} + +pair generateMatchIndices( + const Matrix& priorValue, + const size_t numPriorBBoxes, + const Matrix& gtValue, + const int* gtStartPosPtr, + const size_t seqNum, + const vector>& maxConfScore, + const size_t batchSize, + const real overlapThreshold, + const real negOverlapThreshold, + const size_t negPosRatio, + vector>* matchIndicesVecPtr, + vector>* negIndicesVecPtr) { + vector priorBBoxes; // share same prior bboxes + getBBoxFromPriorData(priorValue.getData(), numPriorBBoxes, priorBBoxes); + size_t totalPos = 0; + size_t totalNeg = 0; + for (size_t n = 0; n < batchSize; ++n) { + vector matchIndices; + vector negIndices; + vector matchOverlaps; + matchIndices.resize(numPriorBBoxes, -1); + matchOverlaps.resize(numPriorBBoxes, 0.0); + size_t numGTBBoxes = 0; + if (n < seqNum) numGTBBoxes = gtStartPosPtr[n + 1] - gtStartPosPtr[n]; + if (!numGTBBoxes) { + matchIndicesVecPtr->push_back(matchIndices); + negIndicesVecPtr->push_back(negIndices); + continue; + } + vector gtBBoxes; + getBBoxFromLabelData( + gtValue.getData() + gtStartPosPtr[n] * 6, numGTBBoxes, gtBBoxes); + + matchBBox( + priorBBoxes, gtBBoxes, overlapThreshold, &matchIndices, &matchOverlaps); + + size_t numPos = 0; + size_t numNeg = 0; + for (size_t i = 0; i < matchIndices.size(); ++i) + if (matchIndices[i] != -1) ++numPos; + totalPos += numPos; + vector> scoresIndices; + for (size_t i = 0; i < matchIndices.size(); ++i) + if (matchIndices[i] == -1 && matchOverlaps[i] < negOverlapThreshold) { + scoresIndices.push_back(std::make_pair(maxConfScore[n][i], i)); + ++numNeg; + } + numNeg = std::min(static_cast(numPos * negPosRatio), numNeg); + std::sort(scoresIndices.begin(), + scoresIndices.end(), + sortScorePairDescend); + for (size_t i = 0; i < numNeg; ++i) + negIndices.push_back(scoresIndices[i].second); + totalNeg += numNeg; + matchIndicesVecPtr->push_back(matchIndices); + negIndicesVecPtr->push_back(negIndices); + } + return std::make_pair(totalPos, totalNeg); +} + +void getMaxConfidenceScores(const real* confData, + const size_t batchSize, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t backgroundId, + vector>* maxConfScoreVecPtr) { + maxConfScoreVecPtr->clear(); + for (size_t i = 0; i < batchSize; ++i) { + vector maxConfScore; + for (size_t j = 0; j < numPriorBBoxes; ++j) { + int offset = j * numClasses; + real maxVal = -FLT_MAX; + real maxPosVal = -FLT_MAX; + real maxScore = 0.0; + for (size_t c = 0; c < numClasses; ++c) { + maxVal = std::max(confData[offset + c], maxVal); + if (c != backgroundId) + maxPosVal = std::max(confData[offset + c], maxPosVal); + } + real sum = 0.0; + for (size_t c = 0; c < numClasses; ++c) + sum += std::exp(confData[offset + c] - maxVal); + maxScore = std::exp(maxPosVal - maxVal) / sum; + maxConfScore.push_back(maxScore); + } + confData += numPriorBBoxes * numClasses; + maxConfScoreVecPtr->push_back(maxConfScore); + } +} + +template +bool sortScorePairDescend(const pair& pair1, + const pair& pair2) { + return pair1.first > pair2.first; +} + +template <> +bool sortScorePairDescend(const pair& pair1, + const pair& pair2) { + return pair1.first > pair2.first; +} + +void applyNMSFast(const vector& bboxes, + const real* confScoreData, + size_t classIdx, + size_t topK, + real confThreshold, + real nmsThreshold, + size_t numPriorBBoxes, + size_t numClasses, + vector* indices) { + vector> scores; + for (size_t i = 0; i < numPriorBBoxes; ++i) { + size_t confOffset = i * numClasses + classIdx; + if (confScoreData[confOffset] > confThreshold) + scores.push_back(std::make_pair(confScoreData[confOffset], i)); + } + std::stable_sort(scores.begin(), scores.end(), sortScorePairDescend); + if (topK > 0 && topK < scores.size()) scores.resize(topK); + while (scores.size() > 0) { + const size_t idx = scores.front().second; + bool keep = true; + for (size_t i = 0; i < indices->size(); ++i) { + if (keep) { + const size_t savedIdx = (*indices)[i]; + real overlap = jaccardOverlap(bboxes[idx], bboxes[savedIdx]); + keep = overlap <= nmsThreshold; + } else { + break; + } + } + if (keep) indices->push_back(idx); + scores.erase(scores.begin()); + } +} + +size_t getDetectionIndices( + const real* confData, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t backgroundId, + const size_t batchSize, + const size_t confThreshold, + const size_t nmsTopK, + const real nmsThreshold, + const size_t keepTopK, + const vector>& allDecodedBBoxes, + vector>>* allDetectionIndices) { + size_t totalKeepNum = 0; + for (size_t n = 0; n < batchSize; ++n) { + const vector& decodedBBoxes = allDecodedBBoxes[n]; + size_t numDetected = 0; + map> indices; + size_t confOffset = n * numPriorBBoxes * numClasses; + for (size_t c = 0; c < numClasses; ++c) { + if (c == backgroundId) continue; + applyNMSFast(decodedBBoxes, + confData + confOffset, + c, + nmsTopK, + confThreshold, + nmsThreshold, + numPriorBBoxes, + numClasses, + &(indices[c])); + numDetected += indices[c].size(); + } + if (keepTopK > 0 && numDetected > keepTopK) { + vector>> scoreIndexPairs; + for (size_t c = 0; c < numClasses; ++c) { + const vector& labelIndices = indices[c]; + for (size_t i = 0; i < labelIndices.size(); ++i) { + size_t idx = labelIndices[i]; + scoreIndexPairs.push_back( + std::make_pair((confData + confOffset)[idx * numClasses + c], + std::make_pair(c, idx))); + } + } + std::sort(scoreIndexPairs.begin(), + scoreIndexPairs.end(), + sortScorePairDescend>); + scoreIndexPairs.resize(keepTopK); + map> newIndices; + for (size_t i = 0; i < scoreIndexPairs.size(); ++i) { + size_t label = scoreIndexPairs[i].second.first; + size_t idx = scoreIndexPairs[i].second.second; + newIndices[label].push_back(idx); + } + allDetectionIndices->push_back(newIndices); + totalKeepNum += keepTopK; + } else { + allDetectionIndices->push_back(indices); + totalKeepNum += numDetected; + } + } + return totalKeepNum; +} + +void getDetectionOutput(const real* confData, + const size_t numKept, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t batchSize, + const vector>>& allIndices, + const vector>& allDecodedBBoxes, + Matrix& out) { + MatrixPtr outBuffer; + Matrix::resizeOrCreate(outBuffer, numKept, 7, false, false); + real* bufferData = outBuffer->getData(); + size_t count = 0; + for (size_t n = 0; n < batchSize; ++n) { + for (map>::const_iterator it = allIndices[n].begin(); + it != allIndices[n].end(); + ++it) { + size_t label = it->first; + const vector& indices = it->second; + const vector& decodedBBoxes = allDecodedBBoxes[n]; + for (size_t i = 0; i < indices.size(); ++i) { + size_t idx = indices[i]; + size_t confOffset = n * numPriorBBoxes * numClasses + idx * numClasses; + bufferData[count * 7] = n; + bufferData[count * 7 + 1] = label; + bufferData[count * 7 + 2] = (confData + confOffset)[label]; + NormalizedBBox clippedBBox = clipBBox(decodedBBoxes[idx]); + bufferData[count * 7 + 3] = clippedBBox.xMin; + bufferData[count * 7 + 4] = clippedBBox.yMin; + bufferData[count * 7 + 5] = clippedBBox.xMax; + bufferData[count * 7 + 6] = clippedBBox.yMax; + ++count; + } + } + } + out.copyFrom(bufferData, numKept * 7); +} + +NormalizedBBox clipBBox(const NormalizedBBox& bbox) { + real realOne = static_cast(1.0); + real realZero = static_cast(0.0); + NormalizedBBox clippedBBox; + clippedBBox.xMin = std::max(std::min(bbox.xMin, realOne), realZero); + clippedBBox.yMin = std::max(std::min(bbox.yMin, realOne), realZero); + clippedBBox.xMax = std::max(std::min(bbox.xMax, realOne), realZero); + clippedBBox.yMax = std::max(std::min(bbox.yMax, realOne), realZero); + return clippedBBox; +} + +} // namespace paddle diff --git a/paddle/gserver/layers/DetectionUtil.h b/paddle/gserver/layers/DetectionUtil.h new file mode 100644 index 0000000000000000000000000000000000000000..fe4f9f075e4cf011c97f68f49598a828d62327b3 --- /dev/null +++ b/paddle/gserver/layers/DetectionUtil.h @@ -0,0 +1,307 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/math/Matrix.h" + +using std::vector; +using std::pair; +using std::map; + +namespace paddle { + +template +struct BBoxBase { + BBoxBase(T xMin, T yMin, T xMax, T yMax) + : xMin(xMin), yMin(yMin), xMax(xMax), yMax(yMax), isDifficult(false) {} + + BBoxBase() {} + + T getWidth() const { return xMax - xMin; } + + T getHeight() const { return yMax - yMin; } + + T getCenterX() const { return (xMin + xMax) / 2; } + + T getCenterY() const { return (yMin + yMax) / 2; } + + T getArea() const { return getWidth() * getHeight(); } + + // coordinate of bounding box + T xMin; + T yMin; + T xMax; + T yMax; + // whether difficult object (e.g. object with heavy occlusion is difficult) + bool isDifficult; +}; + +struct NormalizedBBox : BBoxBase { + NormalizedBBox() : BBoxBase() {} +}; + +enum PermMode { kNCHWToNHWC, kNHWCToNCHW }; + +/** + * @brief First permute input maxtrix then append to output matrix + */ +size_t appendWithPermute(const Matrix& inMatrix, + size_t height, + size_t width, + size_t outTotalSize, + size_t outOffset, + size_t batchSize, + Matrix& outMatrix, + PermMode permMode); + +/** + * @brief First permute input maxtrix then decompose to output + */ +size_t decomposeWithPermute(const Matrix& inMatrix, + size_t height, + size_t width, + size_t totalSize, + size_t offset, + size_t batchSize, + Matrix& outMatrix, + PermMode permMode); + +/** + * @brief Compute jaccard overlap between two bboxes. + * @param bbox1 The first bbox + * @param bbox2 The second bbox + */ +real jaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2); + +/** + * @brief Compute offset parameters between prior bbox and ground truth bbox + * and variances of prior bbox are considered + * @param priorBBox Input prior bbox + * @param priorBBoxVar Variance parameters of prior bbox + * @param gtBBox Groundtruth bbox + * @param outVec Output vector + */ +void encodeBBoxWithVar(const NormalizedBBox& priorBBox, + const vector& priorBBoxVar, + const NormalizedBBox& gtBBox, + vector& outVec); + +/** + * @brief Decode prior bbox with offset parameters + * and variances of prior bbox are considered + * @param priorBBox Prior bbox to be decoded + * @param priorBBoxVar Variance parameters of prior bbox + * @param locPredData Offset parameters + */ +NormalizedBBox decodeBBoxWithVar(const NormalizedBBox& priorBBox, + const vector& priorBBoxVar, + const vector& locPredData); + +/** + * @brief Extract bboxes from prior matrix, the layout is + * xmin1 | ymin1 | xmax1 | ymax1 | xmin1Var | ymin1Var | xmax1Var | ymax1Var ... + * @param priorData Matrix of prior value + * @param numBBoxes Number of bbox to be extracted + * @param bboxVec Append to the vector + */ +void getBBoxFromPriorData(const real* priorData, + const size_t numBBoxes, + vector& bboxVec); + +/** + * @brief Extract labels, scores and bboxes from detection matrix, the layout is + * imageId | label | score | xmin | ymin | xmax | ymax + * @param detectData Matrix of detection value + * @param numBBoxes Number of bbox to be extracted + * @param labelVec Label of bbox + * @param scoreVec Score of bbox + * @param bboxVec Append to the vector + */ +void getBBoxFromDetectData(const real* detectData, + const size_t numBBoxes, + vector& labelVec, + vector& scoreVec, + vector& bboxVec); + +/** + * @brief Extract variances from prior matrix, the layout is + * xmin1 | ymin1 | xmax1 | ymax1 | xmin1Var | ymin1Var | xmax1Var | ymax1Var ... + * @param priorData Matrix of prior value + * @param num Number to be extracted + * @param varVec Append to the vector + */ +void getBBoxVarFromPriorData(const real* priorData, + const size_t num, + vector>& varVec); + +/** + * @brief Extract bboxes from label matrix, the layout is + * class1_1 | xmin1_1 | ymin1_1 | xmax1_1 | ymax1_1 | difficult1_1 | ... + * @param labelData Matrix of label value + * @param numBBoxes Number to be extracted + * @param bboxVec Append to the vector + */ +void getBBoxFromLabelData(const real* labelData, + const size_t numBBoxes, + vector& bboxVec); + +/** +* @brief Match prior bbox to groundtruth bbox, the strategy is: +1. Find the most overlaped bbox pair (prior and groundtruth) +2. For rest of prior bboxes find the most overlaped groundtruth bbox +* @param priorBBoxes prior bbox +* @param gtBBoxes groundtruth bbox +* @param overlapThreshold Low boundary of overlap (judge whether matched) +* @param matchIndices For each prior bbox, groundtruth bbox index if matched +otherwise -1 +* @param matchOverlaps For each prior bbox, overap with all groundtruth bboxes +*/ +void matchBBox(const vector& priorBBoxes, + const vector& gtBBoxes, + real overlapThreshold, + vector* matchIndices, + vector* matchOverlaps); + +/** +* @brief Generate positive bboxes and negative bboxes, +|positive bboxes|/|negative bboxes| is negPosRatio +* @param priorValue Prior value +* @param numPriorBBoxes Number of prior bbox +* @param gtValue Groundtruth value +* @param gtStartPosPtr Since groundtruth value stored as sequence type, +this parameter indicates start position of each record +* @param seqNum Number of sequence +* @param maxConfScore Classification score for prior bbox, used to mine +negative examples +* @param batchSize Image number +* @param overlapThreshold Low boundary of overap +* @param negOverlapThreshold Upper boundary of overap (judge negative example) +* @param negPosRatio Control number of negative bboxes +* @param matchIndicesVecPtr Save indices of matched prior bbox +* @param negIndicesVecPtr Save indices of negative prior bbox +*/ +pair generateMatchIndices( + const Matrix& priorValue, + const size_t numPriorBBoxes, + const Matrix& gtValue, + const int* gtStartPosPtr, + const size_t seqNum, + const vector>& maxConfScore, + const size_t batchSize, + const real overlapThreshold, + const real negOverlapThreshold, + const size_t negPosRatio, + vector>* matchIndicesVecPtr, + vector>* negIndicesVecPtr); + +/** + * @brief Get max confidence score for each prior bbox + * @param confData Confidence scores, layout is + * class1 score | class2 score | ... | classN score ... + * @param batchSize Image number + * @param numPriorBBoxes Prior bbox number + * @param numClasses Classes number + * @param backgroundId Background id + * @param maxConfScoreVecPtr Ouput + */ +void getMaxConfidenceScores(const real* confData, + const size_t batchSize, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t backgroundId, + vector>* maxConfScoreVecPtr); + +template +bool sortScorePairDescend(const pair& pair1, + const pair& pair2); + +template <> +bool sortScorePairDescend(const pair& pair1, + const pair& pair2); + +/** + * @brief Do NMS for bboxes to remove duplicated bboxes + * @param bboxes BBoxes to apply NMS + * @param confScoreData Confidence scores + * @param classIdx Class to do NMS + * @param topK Number to keep + * @param confThreshold Low boundary of confidence score + * @param nmsThreshold Threshold of overlap + * @param numPriorBBoxes Total number of prior bboxes + * @param numClasses Total class number + * @param indices Indices of high quality bboxes + */ +void applyNMSFast(const vector& bboxes, + const real* confScoreData, + size_t classIdx, + size_t topK, + real confThreshold, + real nmsThreshold, + size_t numPriorBBoxes, + size_t numClasses, + vector* indices); + +/** + * @brief Get detection results which satify requirements + * @param numPriorBBoxes Prior bbox number + * @param numClasses Class number + * @param backgroundId Background class + * @param batchSize Image number + * @param confThreshold Threshold of class confidence + * @param nmsTopK Used in NMS operation to keep top k bbox + * @param nmsThreshold Used in NMS, threshold of overlap + * @param keepTopK How many bboxes keeped in an image + * @param allDecodedBBoxes Decoded bboxes for all images + * @param allDetectionIndices Save detection bbox indices + */ +size_t getDetectionIndices( + const real* confData, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t backgroundId, + const size_t batchSize, + const size_t confThreshold, + const size_t nmsTopK, + const real nmsThreshold, + const size_t keepTopK, + const vector>& allDecodedBBoxes, + vector>>* allDetectionIndices); + +/** + * @brief Get detection results + * @param confData Confidence scores + * @param numPriorBBoxes Prior bbox number + * @param numClasses Class number + * @param batchSize Image number + * @param allIndices Indices of predicted bboxes + * @param allDecodedBBoxes BBoxes decoded + * @param out Output matrix + * image number | label | confidence score | xMin | yMin | xMax | yMax + */ +void getDetectionOutput(const real* confData, + const size_t numKept, + const size_t numPriorBBoxes, + const size_t numClasses, + const size_t batchSize, + const vector>>& allIndices, + const vector>& allDecodedBBoxes, + Matrix& out); + +NormalizedBBox clipBBox(const NormalizedBBox& bbox); + +} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index fdcf994cdb47f2409b045a1337332e2f4c304fbc..77736e78f9349c0393e1e53ac700817a70893e53 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -22,26 +22,8 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap, /* Initialize the basic convolutional parent class */ ConvBaseLayer::init(layerMap, parameterMap); - /* The class fields channels_ and numFilters_ are the same as in the config - * i.e., channels_ is the for the input and numFilters_ is for the output - * - * But in order for the variables in convTrans having the same semantic - * meaning as in conv, we need to swap channels_ and numFilters here for - * convTrans, and in other functions too. - * */ - - /* Initialize the projection */ for (auto &inputConfig : config_.inputs()) { const ConvConfig &conf = inputConfig.conv_conf(); - int numFilters = isDeconv_ ? conf.channels() : numFilters_; - subM_.push_back(numFilters / conf.groups()); - subN_.push_back(conf.output_x() * - (conf.has_output_y() ? conf.output_y() : conf.output_x())); - int channel = isDeconv_ ? numFilters_ : conf.channels(); - subK_.push_back( - channel * conf.filter_size() * - (conf.has_filter_size_y() ? conf.filter_size_y() : conf.filter_size()) / - conf.groups()); /* Consistent caffe mode for multiple input */ caffeMode_ = conf.caffe_mode(); } @@ -54,17 +36,9 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap, size_t ExpandConvBaseLayer::getOutputSize() { CHECK_NE(inputLayers_.size(), 0UL); size_t layerSize = ConvBaseLayer::calOutputSize(); - subN_.clear(); - for (size_t i = 0; i < inputLayers_.size(); i++) { - subN_.push_back(outputH_[i] * outputW_[i]); - } return layerSize; } -void ExpandConvBaseLayer::resetExpandInput(size_t height, size_t width) { - Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); -} - void ExpandConvBaseLayer::addSharedBias() { size_t mapW = getOutputSize() / numFilters_; size_t mapH = getOutputValue()->getElementCnt() / mapW; @@ -101,173 +75,6 @@ void ExpandConvBaseLayer::addUnsharedBias() { outValue->addBias(*bias, 1.0f); } -void ExpandConvBaseLayer::expandOneFrame(MatrixPtr image, - size_t startIdx, - int inIdx) { - int channel = isDeconv_ ? numFilters_ : channels_[inIdx]; - - resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); - - CHECK_EQ(image->getWidth(), - static_cast(imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel)); - - real *imgData = image->getData() + startIdx * image->getWidth(); - MatrixPtr imageTmp = - Matrix::create(imgData, - 1, - imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel, - false, - useGpu_); - expandInput_->convExpand(*imageTmp, - imgSizeH_[inIdx], - imgSizeW_[inIdx], - channel, - filterSizeY_[inIdx], - filterSize_[inIdx], - strideY_[inIdx], - stride_[inIdx], - paddingY_[inIdx], - padding_[inIdx], - outputH_[inIdx], - outputW_[inIdx]); - imageTmp->clear(); -} - -void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, - MatrixPtr out, - int inIdx, - int startIdx) { - int subM = subM_[inIdx]; - int subN = subN_[inIdx]; - int subK = subK_[inIdx]; - - expandOneFrame(image, startIdx, inIdx); - - int numFilters = isDeconv_ ? channels_[inIdx] : numFilters_; - - real *outData = out->getData() + startIdx * subN * numFilters; - - real *wgtData = weights_[inIdx]->getW()->getData(); - real *expInData = expandInput_->getData(); - for (int g = 0; g < groups_[inIdx]; ++g) { - MatrixPtr A = - Matrix::create(wgtData, subM, subK, false, useGpu_); // mark transpose - MatrixPtr B = Matrix::create(expInData, subK, subN, false, useGpu_); - MatrixPtr C = Matrix::create(outData, subM, subN, false, useGpu_); - C->mul(*A, *B, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - wgtData += subK * subM; - expInData += subK * subN; - outData += subM * subN; - } -} - -void ExpandConvBaseLayer::bpropActs(MatrixPtr out, - MatrixPtr image, - int inpIdx) { - int channel = isDeconv_ ? numFilters_ : channels_[inpIdx]; - - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - size_t batchSize = image->getHeight(); - - /* reset the expand-grad memory */ - resetExpandInput(subK * groups_[inpIdx], subN); - - real *localGradData = out->getData(); - real *tgtGradData = image->getData(); - for (size_t n = 0; n < batchSize; n++) { - real *wgtData = weights_[inpIdx]->getW()->getData(); - real *expandInData = expandInput_->getData(); - - for (int g = 0; g < groups_[inpIdx]; g++) { - // create temporary matrix - MatrixPtr C = Matrix::create(expandInData, subK, subN, false, useGpu_); - MatrixPtr B = Matrix::create(localGradData, subM, subN, false, useGpu_); - MatrixPtr A = Matrix::create(wgtData, subM, subK, true, useGpu_); - C->mul(*A, *B); // mul - - // clear the temporary matrix - A->clear(); - B->clear(); - C->clear(); - - expandInData += subK * subN; - localGradData += subM * subN; - wgtData += subK * subM; - } - - // shrink one frame outGrad - MatrixPtr oneGradTmp = Matrix::create( - expandInput_->getData(), subK * groups_[inpIdx], subN, false, useGpu_); - MatrixPtr vTmp = - Matrix::create(tgtGradData, - 1, - imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel, - false, - useGpu_); - vTmp->convShrink(*oneGradTmp, - imgSizeH_[inpIdx], - imgSizeW_[inpIdx], - channel, - filterSizeY_[inpIdx], - filterSize_[inpIdx], - strideY_[inpIdx], - stride_[inpIdx], - paddingY_[inpIdx], - padding_[inpIdx], - outputH_[inpIdx], - outputW_[inpIdx], - 1.0f, - 1.0f); - vTmp->clear(); - oneGradTmp->clear(); - - // move the data-pointer - tgtGradData += imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel; - } -} - -void ExpandConvBaseLayer::bpropWeights(MatrixPtr image, - MatrixPtr out, - int inpIdx) { - MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); - - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - size_t batchSize = image->getHeight(); - resetExpandInput(subK * groups_[inpIdx], subN); - - real *gradData = out->getData(); - - for (size_t n = 0; n < batchSize; n++) { // frame by frame - // expand - expandOneFrame(image, n, inpIdx); - real *wGradData = weightGrad->getData(); - real *expandInData = expandInput_->getData(); - - // expand-mul one-group by one - for (int g = 0; g < groups_[inpIdx]; g++) { - MatrixPtr A = Matrix::create(expandInData, subK, subN, true, useGpu_); - MatrixPtr B = Matrix::create(gradData, subM, subN, false, useGpu_); - MatrixPtr C = Matrix::create(wGradData, subM, subK, false, useGpu_); - C->mul(*B, *A, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - gradData += subM * subN; - wGradData += subK * subM; - expandInData += subK * subN; - } - } -} - void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { size_t mapW = getOutputSize() / numFilters_; size_t mapH = v->getElementCnt() / mapW; diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h index aabcdfc392d3e242df84c820c336d8b32c7cb04f..01c699d2344443a1887ec0b5005125f617cbe279 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -26,19 +26,6 @@ namespace paddle { */ class ExpandConvBaseLayer : public ConvBaseLayer { protected: - /// For expand convolution. - /// subM_ = numFilters_ / groups_. - IntV subM_; - /// subN_ = outputH_ * outputW_. - IntV subN_; - /// subK_ = channels_ * filterPixels_ * groups_. - IntV subK_; - - /*The expandInput_ and transOutValue_ are used for CPU expand conv calc - * Expand one sample at a time. shape: - * (numChannels * filterPixels_, outputSizeH * outputSizeW) - * */ - MatrixPtr expandInput_; /// The transpose of output, which is an auxiliary matrix. MatrixPtr transOutValue_; @@ -52,10 +39,6 @@ public: const ParameterMap& parameterMap) override; size_t getOutputSize(); - /** - * Create or resize expandInput_. - */ - void resetExpandInput(size_t height, size_t width); /** * Add shared bias. @@ -66,20 +49,9 @@ public: * Add unshared bias. */ void addUnsharedBias(); - /** - * Expand one input sample. - */ - void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx); - - /** - * Expand one input sample and perform matrix multiplication. - */ - void expandFwdOnce(MatrixPtr image, MatrixPtr out, int inIdx, int startIdx); void bpropSharedBias(MatrixPtr biases, MatrixPtr v); void bpropBiases(MatrixPtr v); - void bpropWeights(MatrixPtr image, MatrixPtr out, int inpIdx); - void bpropActs(MatrixPtr image, MatrixPtr out, int inpIdx); }; } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index f9267b81a7d4264f5f43552e3d54a45e4b212e00..914689e66cdb8947e886e17e75829183c1af1a42 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -18,32 +18,94 @@ limitations under the License. */ namespace paddle { +/* + * The calculation of the exconvt(convolution transpose (deconv) operation) + * is a swap of forward and backward of the calculation of exconv. + * */ REGISTER_LAYER(exconv, ExpandConvLayer); +REGISTER_LAYER(exconvt, ExpandConvLayer); bool ExpandConvLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ ExpandConvBaseLayer::init(layerMap, parameterMap); + + size_t numInputs = config_.inputs_size(); + inputShape_.resize(numInputs); + filterShape_.resize(numInputs); + outputShape_.resize(numInputs); + for (int i = 0; i < config_.inputs_size(); i++) { + std::vector paddings = {(size_t)paddingY_[i], (size_t)padding_[i]}; + std::vector strides = {(size_t)strideY_[i], (size_t)stride_[i]}; + createFunction(forward_, + !isDeconv_ ? "GemmConv" : "GemmConvGradInput", + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", (size_t)groups_[i])); + + createFunction(backward_, + !isDeconv_ ? "GemmConvGradInput" : "GemmConv", + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", (size_t)groups_[i])); + + createFunction(backward_, + "GemmConvGradFilter", + FuncConfig() + .set("paddings", paddings) + .set("strides", strides) + .set("groups", (size_t)groups_[i])); + } return true; } +// i is the index of input layers +#define BACKWARD_INPUT(i, inputs, outputs) \ + backward_[2 * i]->calc(inputs, outputs) +#define BACKWARD_FILTER(i, inputs, outputs) \ + backward_[2 * i + 1]->calc(inputs, outputs) + void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); - /* malloc memory for the output_ if necessary */ - int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); + size_t batchSize = inputLayers_[0]->getOutputValue()->getHeight(); resetOutput(batchSize, getOutputSize()); - MatrixPtr image = nullptr; - MatrixPtr outV = getOutputValue(); + // Calculate the shape of the input, output, and filter. for (size_t i = 0; i < inputLayers_.size(); ++i) { - LayerPtr prevLayer = getPrev(i); - image = prevLayer->getOutputValue(); - for (size_t off = 0; off < image->getHeight(); off++) { - REGISTER_TIMER_INFO("expandFwdOnce", getName().c_str()); - expandFwdOnce(image, outV, i, off); - } + inputShape_[i] = TensorShape({(size_t)batchSize, + (size_t)channels_[i], + (size_t)imgSizeH_[i], + (size_t)imgSizeW_[i]}); + filterShape_[i] = + TensorShape({(size_t)groups_[i], + !isDeconv_ ? (size_t)numFilters_ / groups_[i] + : (size_t)channels_[i] / groups_[i], + !isDeconv_ ? (size_t)channels_[i] / groups_[i] + : (size_t)numFilters_ / groups_[i], + (size_t)filterSizeY_[i], + (size_t)filterSize_[i]}); + outputShape_[i] = TensorShape({(size_t)batchSize, + (size_t)numFilters_, + (size_t)outputH_[i], + (size_t)outputW_[i]}); } + + // Calculate the output value. + for (size_t i = 0; i < inputLayers_.size(); ++i) { + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getInputValue(i), inputShape_[i]); + inputs.addArg(*weights_[i]->getW(), filterShape_[i]); + outputs.addArg(*getOutputValue(), + outputShape_[i], + !isDeconv_ && i == 0 ? ASSIGN_TO : ADD_TO); + + forward_[i]->calc(inputs, outputs); + } + /* add the bias-vector */ if (biases_.get()) { if (sharedBiases_) { @@ -67,14 +129,30 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) { biases_->getParameterPtr()->incUpdate(callback); } + // Calculate the input grad and filter grad. for (size_t i = 0; i < inputLayers_.size(); ++i) { - /* First, calculate the input layers error */ - if (getPrev(i)->getOutputGrad()) { - bpropActs(outGrad, getPrev(i)->getOutputGrad(), i); + if (getInputGrad(i)) { + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getOutputGrad(), outputShape_[i]); + inputs.addArg(*weights_[i]->getW(), filterShape_[i]); + outputs.addArg(*getInputGrad(i), inputShape_[i], ADD_TO); + BACKWARD_INPUT(i, inputs, outputs); } + if (weights_[i]->getWGrad()) { - /* Then, calculate the W-gradient for the current layer */ - bpropWeights(getPrev(i)->getOutputValue(), outGrad, i); + BufferArgs inputs; + BufferArgs outputs; + if (!isDeconv_) { + inputs.addArg(*getOutputGrad(), outputShape_[i]); + inputs.addArg(*getInputValue(i), inputShape_[i]); + } else { + inputs.addArg(*getInputValue(i), inputShape_[i]); + inputs.addArg(*getOutputGrad(), outputShape_[i]); + } + outputs.addArg(*weights_[i]->getWGrad(), filterShape_[i], ADD_TO); + BACKWARD_FILTER(i, inputs, outputs); + /* Increasing the number of gradient */ weights_[i]->getParameterPtr()->incUpdate(callback); } diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index 60681690e5dd55b2e9aa4e1f25758db6033665a6..a1f943d1521547af0f82cec7da8a4efe9037cd71 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -40,6 +40,11 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; + +protected: + std::vector inputShape_; + std::vector filterShape_; + std::vector outputShape_; }; } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp deleted file mode 100644 index 520586b13889790c94a3e29902a4ea0ee55e8555..0000000000000000000000000000000000000000 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "ExpandConvTransLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" - -/* The implementation of the convTransLayer is basically a swap of forward and - * backward of the original convLayer. - * The variable naming follows the convention of the convLayer. - * */ - -namespace paddle { - -REGISTER_LAYER(exconvt, ExpandConvTransLayer); - -bool ExpandConvTransLayer::init(const LayerMap &layerMap, - const ParameterMap ¶meterMap) { - /* Initialize the basic convolutional parent class */ - ExpandConvBaseLayer::init(layerMap, parameterMap); - - return true; -} - -void ExpandConvTransLayer::forward(PassType passType) { - Layer::forward(passType); - - /* malloc memory for the output_ if necessary */ - int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - resetOutput(batchSize, getOutputSize()); - - MatrixPtr output = nullptr; - for (size_t i = 0; i < inputLayers_.size(); ++i) { - LayerPtr prevLayer = getPrev(i); - output = prevLayer->getOutputValue(); - REGISTER_TIMER_INFO("shrinkFwd", getName().c_str()); - bpropActs(output, getOutputValue(), i); - } - - /* add the bias-vector */ - if (biases_.get()) { - if (sharedBiases_) { - addSharedBias(); - } else { - addUnsharedBias(); - } - } - - /* activation */ - forwardActivation(); -} - -void ExpandConvTransLayer::backward(const UpdateCallback &callback) { - backwardActivation(); - - MatrixPtr imageGrad = getOutputGrad(); - if (biases_ && biases_->getWGrad()) { - bpropBiases(imageGrad); - /* Increasing the number of gradient */ - biases_->getParameterPtr()->incUpdate(callback); - } - - for (size_t i = 0; i < inputLayers_.size(); ++i) { - /* First, calculate the input layers error */ - for (size_t off = 0; off < imageGrad->getHeight(); off++) { - if (getPrev(i)->getOutputGrad()) { - expandFwdOnce(imageGrad, getPrev(i)->getOutputGrad(), i, off); - } - } - if (weights_[i]->getWGrad()) { - /* Then, calculate the W-gradient for the current layer */ - bpropWeights(imageGrad, getPrev(i)->getOutputValue(), i); - /* Increasing the number of gradient */ - weights_[i]->getParameterPtr()->incUpdate(callback); - } - } -} - -} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h deleted file mode 100644 index 00b8f241889fdd3f423d75dedd9068aa3674f190..0000000000000000000000000000000000000000 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "ExpandConvBaseLayer.h" -#include "paddle/math/Matrix.h" - -namespace paddle { - -/** - * @brief A subclass of convolution layer. - * This layer expands input and use matrix multiplication to - * calculate convolution transpose (deconv) operation. - * - * The config file api is img_conv_layer with flag trans=True. - */ -class ExpandConvTransLayer : public ExpandConvBaseLayer { -public: - explicit ExpandConvTransLayer(const LayerConfig& config) - : ExpandConvBaseLayer(config) {} - - ~ExpandConvTransLayer() {} - - bool init(const LayerMap& layerMap, - const ParameterMap& parameterMap) override; - - void forward(PassType passType) override; - void backward(const UpdateCallback& callback) override; -}; - -} // namespace paddle diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index d07299bfe3c4147742384a45dc6f1698d9c382f4..83fcfed46cd568d22237eeef9c0215e4e3ad2666 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 40bb1e2d73c81280a9b12114c13de851285c276b..6035a866b4eee4c6a61fa93f3adbf5e1d2d549f7 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/math/MathUtils.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index 54b72375b743fe025e0ded5fdbce5699a0b4be1a..e7325e0cc3b7195b5fec77c878e3e087cfc643e0 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/math/MathUtils.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" diff --git a/paddle/parameter/tests/test_argument.cpp b/paddle/parameter/tests/test_argument.cpp index 81fe4ee397351a013c8616ad08fb8cb4b8dae4d0..98ab013548734059060eb06ce1a7cec23dbf1b72 100644 --- a/paddle/parameter/tests/test_argument.cpp +++ b/paddle/parameter/tests/test_argument.cpp @@ -42,7 +42,7 @@ TEST(Argument, poolSequenceWithStride) { CHECK_EQ(outStart[3], 4); CHECK_EQ(outStart[4], 7); - CHECK_EQ(stridePositions->getSize(), 8); + CHECK_EQ(stridePositions->getSize(), 8UL); auto result = reversed ? strideResultReversed : strideResult; for (int i = 0; i < 8; i++) { CHECK_EQ(stridePositions->getData()[i], result[i]); diff --git a/paddle/trainer/CMakeLists.txt b/paddle/trainer/CMakeLists.txt index 06c019f0a97757b658d1bc3405246d8f47632aad..9d246b6690134d96e9a262c6ac64d998536128a9 100644 --- a/paddle/trainer/CMakeLists.txt +++ b/paddle/trainer/CMakeLists.txt @@ -4,6 +4,7 @@ set(TRAINER_SOURCES ParameterUpdater.cpp ParamUtil.cpp RemoteParameterUpdater.cpp + NewRemoteParameterUpdater.cpp Tester.cpp Trainer.cpp TrainerInternal.cpp @@ -16,6 +17,7 @@ set(TRAINER_HEADERS ParameterUpdater.h ParamUtil.h RemoteParameterUpdater.h + NewRemoteParameterUpdater.h Tester.h TesterConfig.h Trainer.h @@ -32,7 +34,7 @@ add_style_check_target(paddle_trainer_lib add_style_check_target(paddle_trainer_lib ${TRAINER_HEADERS}) add_dependencies(paddle_trainer_lib - gen_proto_cpp) + gen_proto_cpp paddle_pserver_cclient_lib) macro(add_paddle_exe TARGET_NAME) add_executable(${TARGET_NAME} ${ARGN}) @@ -56,3 +58,10 @@ install(TARGETS paddle_trainer paddle_merge_model set_target_properties(paddle_trainer PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE) set_target_properties(paddle_merge_model PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE) + +if(APPLE) + set(CMAKE_EXE_LINKER_FLAGS "-framework CoreFoundation -framework Security") +endif() + +target_link_libraries(paddle_trainer ${CMAKE_CURRENT_SOURCE_DIR}/libpaddle_pserver_cclient.a) +target_link_libraries(paddle_trainer_lib ${CMAKE_CURRENT_SOURCE_DIR}/libpaddle_pserver_cclient.a) diff --git a/paddle/trainer/NewRemoteParameterUpdater.cpp b/paddle/trainer/NewRemoteParameterUpdater.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f25ce2f7f06f6da0feab27da61b8e49689cbe213 --- /dev/null +++ b/paddle/trainer/NewRemoteParameterUpdater.cpp @@ -0,0 +1,86 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "NewRemoteParameterUpdater.h" +#include "Trainer.h" +#include "paddle/utils/Stat.h" + +DECLARE_int32(trainer_id); +DECLARE_string(save_dir); + +namespace paddle { +NewRemoteParameterUpdater::NewRemoteParameterUpdater( + const OptimizationConfig &config, const std::string pserverSpec) + : parameterClient_(-1), + newParameters_(nullptr), + newGradients_(nullptr), + pserverSpec_(pserverSpec) {} + +void NewRemoteParameterUpdater::init( + const std::vector ¶meters) { + ParameterUpdater::init(parameters); + + for (auto ¶ : parameters_) { + para->getBuf(PARAMETER_VALUE)->zeroMem(); + para->getBuf(PARAMETER_GRADIENT)->zeroMem(); + } + + // create parameter server client. + parameterClient_ = paddle_new_pserver_client((char *)pserverSpec_.c_str(), + FLAGS_trainer_id == 0); + + // init new parameter and gradient. + newParameters_ = initNewParameter(PARAMETER_VALUE); + newGradients_ = initNewParameter(PARAMETER_GRADIENT); + + // init parameter, one trainer will get the opportunity to int parameter and + // send them to parameter server. Others will get the initialized parameter + // from parameter server + if (paddle_begin_init_params(parameterClient_)) { + LOG(INFO) << "paddle_begin_init_params start"; + for (int i = 0; i < parameterSize(); ++i) { + auto paramConfig = parameters_[i]->getConfig(); + std::string bytes = paramConfig.SerializeAsString(); + const char *array = bytes.data(); + int size = (int)bytes.size(); + paddle_init_param( + parameterClient_, *newParameters_[i], (void *)array, size); + } + paddle_finish_init_params(parameterClient_); + LOG(INFO) << "paddle_begin_init_params done"; + } else { + paddle_get_params(parameterClient_, newParameters_, parameterSize()); + } + + LOG(INFO) << "NewRemoteParameterUpdater initialized"; +} + +void NewRemoteParameterUpdater::updateImpl(Parameter *para) {} + +void NewRemoteParameterUpdater::finishBatch(real cost) { + // send gradient to parameter server. + paddle_send_grads(parameterClient_, newGradients_, parameterSize()); + // get the updated parameter from parameterClient. + paddle_get_params(parameterClient_, newParameters_, parameterSize()); + + // clear gradient after update parameter. + for (auto ¶ : parameters_) { + para->getBuf(PARAMETER_GRADIENT)->zeroMem(); + } +} + +void NewRemoteParameterUpdater::startPass() {} + +bool NewRemoteParameterUpdater::finishPass() { return true; } +} diff --git a/paddle/trainer/NewRemoteParameterUpdater.h b/paddle/trainer/NewRemoteParameterUpdater.h new file mode 100644 index 0000000000000000000000000000000000000000..f735185f62b3491a63e34cfc4a2ef73dae12243e --- /dev/null +++ b/paddle/trainer/NewRemoteParameterUpdater.h @@ -0,0 +1,114 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "ParameterUpdater.h" +#include "libpaddle_pserver_cclient.h" +#include "paddle/pserver/ParameterClient2.h" +#include "paddle/utils/Queue.h" +#include "paddle/utils/Util.h" + +namespace paddle { + +/** + * New remote parameter updater for dense parameters that use cclient of go. + */ +class NewRemoteParameterUpdater : public ParameterUpdater { +public: + NewRemoteParameterUpdater(const OptimizationConfig& config, + const std::string pserverSpec); + ~NewRemoteParameterUpdater() { + releaseNewParameter(newParameters_); + releaseNewParameter(newGradients_); + if (parameterClient_ >= 0) paddle_pserver_client_release(parameterClient_); + } + + /** + * initialize the internal parameter client and itself. + */ + virtual void init(const std::vector& parameters); + /** + * @brief start batch + * + * @note one batch training exhibits stateful feature to help + * to do performance tuning, sgd optimization if necessary. + */ + virtual PassType startBatch(int64_t batchSize) { return PASS_TRAIN; } + + /** + * send parameters to pservers and get returned parameters + * from all pservers if necessary. + */ + virtual void finishBatch(real cost); + virtual void startPass(); + virtual bool finishPass(); + +protected: + /** + * work need to do after finishBatch + */ + virtual void updateImpl(Parameter* para); + +private: + int parameterSize() { return (int)parameters_.size(); } + + /** + * init parameter of go paddle pserver cclient. + * @param new_params + * @param type + */ + paddle_parameter** initNewParameter(ParameterType type) { + paddle_parameter** new_params = + (paddle_parameter**)malloc(sizeof(paddle_parameter*) * parameterSize()); + for (int i = 0; i < parameterSize(); ++i) { + new_params[i] = (paddle_parameter*)malloc(sizeof(paddle_parameter)); + memset(new_params[i], 0, sizeof(paddle_parameter)); + } + + for (int i = 0; i < parameterSize(); ++i) { + ParameterPtr param = parameters_[i]; + new_params[i]->element_type = PADDLE_ELEMENT_TYPE_FLOAT32; + new_params[i]->name = (char*)param->getName().c_str(); + new_params[i]->content = + (unsigned char*)(param->getBuf(type).get()->getData()); + new_params[i]->content_len = + (int)param->getBuf(type).get()->getSize() * sizeof(real); + } + return new_params; + } + + void releaseNewParameter(paddle_parameter** newParams) { + if (newParams != nullptr) { + for (int i = 0; i < parameterSize(); ++i) { + free(newParams[i]); + } + free(newParams); + } + } + +protected: + /// internal parameter client object for exchanging data with pserver + paddle_pserver_client parameterClient_; + /// the parameters for new pserver client + paddle_parameter** newParameters_; + /// the gradinets for new pserver client + paddle_parameter** newGradients_; + /// the specification of parameter server "host1:port,host1:port" + std::string pserverSpec_; +}; + +} // namespace paddle diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 3640dd3a75ea212a84255ea7f6369b63606482ab..0e17c42d34f147db190ac5e5ccd5339360cc35bb 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -18,7 +18,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT_DIR}/.timestamp - DEPENDS gen_proto_py ${PY_FILES} ${external_project_dependencies}) + DEPENDS gen_proto_py ${PY_FILES} ${external_project_dependencies} paddle_master_shared) add_custom_target(paddle_python ALL DEPENDS ${OUTPUT_DIR}/.timestamp) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 0792e2d40b43f5fb2de8d6bb43a62cfa23f77082..fc2e3bbcde0e94b6325bd0ca1fd41e088df0b950 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -126,6 +126,7 @@ def init_config_environment( g_config=TrainerConfig(), g_layer_map={}, g_parameter_map={}, + g_parameter_initializer_map={}, g_extended_config_funcs={}, # store command args of paddle_trainer @@ -439,22 +440,22 @@ def model_type(name): @config_class class Bias(Cfg): - def __init__( - self, - parameter_name=None, - learning_rate=None, - momentum=None, - decay_rate=None, - decay_rate_l1=None, - initial_mean=None, - initial_std=None, - initial_strategy=None, - initial_smart=None, - num_batches_regularization=None, - sparse_remote_update=None, - gradient_clipping_threshold=None, - is_static=None, - is_shared=None, ): + def __init__(self, + parameter_name=None, + learning_rate=None, + momentum=None, + decay_rate=None, + decay_rate_l1=None, + initial_mean=None, + initial_std=None, + initial_strategy=None, + initial_smart=None, + num_batches_regularization=None, + sparse_remote_update=None, + gradient_clipping_threshold=None, + is_static=None, + is_shared=None, + initializer=None): self.add_keys(locals()) @@ -465,6 +466,7 @@ class Input(Cfg): self, input_layer_name, parameter_name=None, + initializer=None, learning_rate=None, momentum=None, decay_rate=None, @@ -521,6 +523,7 @@ class Projection(Input): initial_std=None, initial_strategy=None, initial_smart=None, + initializer=None, num_batches_regularization=None, sparse_remote_update=None, sparse_update=None, @@ -1479,7 +1482,8 @@ class LayerBase(object): gradient_clipping_threshold=bias. gradient_clipping_threshold, is_static=bias.is_static, - is_shared=bias.is_shared, ) + is_shared=bias.is_shared, + initializer=bias.initializer) if for_self: self.config.bias_parameter_name = bias.parameter_name else: @@ -1536,7 +1540,8 @@ class LayerBase(object): format=format, is_static=input_config.is_static, is_shared=input_config.is_shared, - update_hooks=input_config.update_hooks) + update_hooks=input_config.update_hooks, + initializer=input_config.initializer) def set_layer_size(self, size): if self.config.size == 0: @@ -3221,7 +3226,8 @@ def Parameter(name, need_compact=None, is_static=None, is_shared=None, - update_hooks=None): + update_hooks=None, + initializer=None): config_assert(name not in g_parameter_map, 'Duplicated parameter name: ' + name) @@ -3309,6 +3315,11 @@ def Parameter(name, para.update_hooks.extend(update_hooks) g_parameter_map[name] = para + if initializer is not None: + config_assert( + callable(initializer), + "parameter initializer should be a callable object") + g_parameter_initializer_map[name] = initializer @config_func diff --git a/python/paddle/trainer_config_helpers/attrs.py b/python/paddle/trainer_config_helpers/attrs.py index d1167a234caed3753c6beedfc89b01054e3688e1..4100697c9c3770f1b748ea630d5f8193167fe7fc 100644 --- a/python/paddle/trainer_config_helpers/attrs.py +++ b/python/paddle/trainer_config_helpers/attrs.py @@ -95,6 +95,10 @@ class ParameterAttribute(object): :param sparse_update: Enable sparse update for this parameter. It will enable both local and remote sparse update. :type sparse_update: bool + :param initializer: If not None, it should be a callable object which accepts + a parameter name and returns numpy array for the initial + value of the parameter + :param initializer: callable object """ def __init__(self, @@ -109,7 +113,8 @@ class ParameterAttribute(object): learning_rate=None, momentum=None, gradient_clipping_threshold=None, - sparse_update=False): + sparse_update=False, + initializer=None): self.attr = {} if is_static: @@ -161,6 +166,8 @@ class ParameterAttribute(object): is_compatible_with(gradient_clipping_threshold, float): self.attr['gradient_clipping_threshold'] = \ gradient_clipping_threshold + if initializer is not None: + self.attr['initializer'] = initializer def set_default_parameter_name(self, name): """ diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py index b9d0a7f29138cae281236b26509a56738f3801f4..102331c0bb6477cbeb618f015aad76a0414723ba 100644 --- a/python/paddle/v2/__init__.py +++ b/python/paddle/v2/__init__.py @@ -26,6 +26,7 @@ import evaluator from . import dataset from . import reader from . import plot +from . import master import attr import op import pooling @@ -37,9 +38,26 @@ import plot import image __all__ = [ - 'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer', - 'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader', - 'topology', 'networks', 'infer', 'plot', 'evaluator', 'image' + 'optimizer', + 'layer', + 'activation', + 'parameters', + 'init', + 'trainer', + 'event', + 'data_type', + 'attr', + 'pooling', + 'data_feeder', + 'dataset', + 'reader', + 'topology', + 'networks', + 'infer', + 'plot', + 'evaluator', + 'image', + 'master', ] diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index 418b592a5ac638cc61b86a9b3fbdcee1e3a0bcaf..9c614914b5e372e8e5e3c3c072b18b83edf51e87 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -149,3 +149,57 @@ def cluster_files_reader(files_pattern, yield line return reader + + +def convert(output_path, + reader, + num_shards, + name_prefix, + max_lines_to_shuffle=1000): + import recordio + import cPickle as pickle + import random + """ + Convert data from reader to recordio format files. + + :param output_path: directory in which output files will be saved. + :param reader: a data reader, from which the convert program will read data instances. + :param num_shards: the number of shards that the dataset will be partitioned into. + :param name_prefix: the name prefix of generated files. + :param max_lines_to_shuffle: the max lines numbers to shuffle before writing. + """ + + assert num_shards >= 1 + assert max_lines_to_shuffle >= 1 + + def open_writers(): + w = [] + for i in range(0, num_shards): + n = "%s/%s-%05d-of-%05d" % (output_path, name_prefix, i, + num_shards - 1) + w.append(recordio.writer(n)) + + return w + + def close_writers(w): + for i in range(0, num_shards): + w[i].close() + + def write_data(w, lines): + random.shuffle(lines) + for i, d in enumerate(lines): + d = pickle.dumps(d, pickle.HIGHEST_PROTOCOL) + w[i % num_shards].write(d) + + w = open_writers() + lines = [] + + for i, d in enumerate(reader()): + lines.append(d) + if i % max_lines_to_shuffle == 0 and i >= max_lines_to_shuffle: + write_data(w, lines) + lines = [] + continue + + write_data(w, lines) + close_writers(w) diff --git a/python/paddle/v2/dataset/tests/common_test.py b/python/paddle/v2/dataset/tests/common_test.py index f9815d4f9e1ee3bbe9ccf2dae588c51c262468c1..cfa194eba38ea70311c4deeac2635dc0a0103576 100644 --- a/python/paddle/v2/dataset/tests/common_test.py +++ b/python/paddle/v2/dataset/tests/common_test.py @@ -57,6 +57,38 @@ class TestCommon(unittest.TestCase): for idx, e in enumerate(reader()): self.assertEqual(e, str("0")) + def test_convert(self): + record_num = 10 + num_shards = 4 + + def test_reader(): + def reader(): + for x in xrange(record_num): + yield x + + return reader + + path = tempfile.mkdtemp() + paddle.v2.dataset.common.convert(path, + test_reader(), num_shards, + 'random_images') + + files = glob.glob(path + '/random_images-*') + self.assertEqual(len(files), num_shards) + + recs = [] + for i in range(0, num_shards): + n = "%s/random_images-%05d-of-%05d" % (path, i, num_shards - 1) + r = recordio.reader(n) + while True: + d = r.read() + if d is None: + break + recs.append(d) + + recs.sort() + self.assertEqual(total, record_num) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/master/.gitignore b/python/paddle/v2/master/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a3ac6e1a33e74631136fc95574532284db7cd7cd --- /dev/null +++ b/python/paddle/v2/master/.gitignore @@ -0,0 +1,3 @@ +*.whl +*.so +*.pyc diff --git a/python/paddle/v2/master/__init__.py b/python/paddle/v2/master/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c8975b5d4a33cbecb4fa5a144bc610c36591d629 --- /dev/null +++ b/python/paddle/v2/master/__init__.py @@ -0,0 +1,3 @@ +from client import * + +__all__ = ['client'] diff --git a/python/paddle/v2/master/client.py b/python/paddle/v2/master/client.py new file mode 100644 index 0000000000000000000000000000000000000000..de8e9bb88e1064e41a80e0ef7838e307089a1331 --- /dev/null +++ b/python/paddle/v2/master/client.py @@ -0,0 +1,39 @@ +import ctypes +import os + +path = os.path.join(os.path.dirname(__file__), "libpaddle_master.so") +lib = ctypes.cdll.LoadLibrary(path) + + +class client(object): + """ + client is a client to the master server. + """ + + def __init__(self, addr, buf_size): + self.c = lib.paddle_new_master_client(addr, buf_size) + + def close(self): + lib.paddle_release_master_client(self.c) + self.c = None + + def set_dataset(self, paths): + holder_type = ctypes.c_char_p * len(paths) + holder = holder_type() + print paths + for idx, path in enumerate(paths): + c_ptr = ctypes.c_char_p(path) + holder[idx] = c_ptr + lib.paddle_set_dataset(self.c, holder, len(paths)) + + def next_record(self): + p = ctypes.c_char_p() + ret = ctypes.pointer(p) + size = lib.paddle_next_record(self.c, ret) + if size == 0: + # Empty record + return "" + record = ret.contents.value[:size] + # Memory created from C should be freed. + lib.mem_free(ret.contents) + return record diff --git a/python/paddle/v2/optimizer.py b/python/paddle/v2/optimizer.py index 5e99d4a241b7fe2b0f9ff4ba191db4b341c4d30e..1ef2dceca910e806bddf17c95d1c345a144d9e31 100644 --- a/python/paddle/v2/optimizer.py +++ b/python/paddle/v2/optimizer.py @@ -45,7 +45,12 @@ class Optimizer(object): return swig_api.ParameterUpdater.createRemoteUpdater( self.__opt_conf__, pass_num, use_sparse_updater) - def create_updater(self, is_local, num_passes, use_sparse_updater): + def __create_new_remote_updater__(self, pserver_spec): + return swig_api.ParameterUpdater.createNewRemoteUpdater( + self.__opt_conf__, pserver_spec) + + def create_updater(self, is_local, num_passes, use_sparse_updater, + pserver_spec): """ create proper parameter_updater by configuration. :param is_local: create local or remote parameter updater @@ -64,8 +69,12 @@ class Optimizer(object): if is_local: parameter_updater = self.__create_local_updater__() else: - parameter_updater = self.__create_remote_updater__( - num_passes, use_sparse_updater) + if pserver_spec is None: + parameter_updater = self.__create_remote_updater__( + num_passes, use_sparse_updater) + else: + parameter_updater = self.__create_new_remote_updater__( + pserver_spec) return parameter_updater diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index 64805d0c504b876f4d1f6657fe94457534a0b278..ad20241b98302f136326ae491c6723a6c12ae284 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -1,6 +1,7 @@ import numpy as np import py_paddle.swig_paddle as api from paddle.proto.ParameterConfig_pb2 import ParameterConfig +import paddle.trainer.config_parser as cp import struct import tarfile import cStringIO @@ -18,8 +19,11 @@ def create(layers): """ topology = Topology(layers) pool = Parameters() + initializers = cp.g_parameter_initializer_map for param in topology.proto().parameters: pool.__append_config__(param) + if param.name in initializers: + pool[param.name] = initializers[param.name](param.name) return pool diff --git a/python/paddle/v2/tests/test_parameters.py b/python/paddle/v2/tests/test_parameters.py index ebb182caab6430862a8e4da2ae4ea6b1e72f726c..45372e7dd0ec7cbdd6a2eb5c0397ef7e74284cd0 100644 --- a/python/paddle/v2/tests/test_parameters.py +++ b/python/paddle/v2/tests/test_parameters.py @@ -11,6 +11,9 @@ except ImportError: sys.exit(0) import paddle.v2.parameters as parameters +import paddle.v2.data_type as data_type +import paddle.v2.layer as layer +from paddle.v2.attr import ParamAttr from paddle.proto.ParameterConfig_pb2 import ParameterConfig import random import cStringIO @@ -55,6 +58,25 @@ class TestParameters(unittest.TestCase): p1 = params_dup.get(name) self.assertTrue(numpy.isclose(p0, p1).all()) + def test_initializer(self): + def initializer(name): + assert name == "fc.w" + mat = numpy.ones((3, 2), dtype=numpy.float32) + mat[1, 1] = 2 + return mat + + x = layer.data(name="x", type=data_type.dense_vector(3)) + y = layer.fc(x, + size=2, + bias_attr=False, + param_attr=ParamAttr( + name="fc.w", initializer=initializer)) + params = parameters.create(y) + val = params["fc.w"] + assert val.shape == (3, 2) + expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32) + assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 8fdb67cc2688a67ed815af396b214e339195c73f..f9658a8c5df9562073c8a187074a6cb3459ac5d9 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -49,7 +49,8 @@ class SGD(object): parameters, update_equation, extra_layers=None, - is_local=True): + is_local=True, + pserver_spec=None): if not isinstance(parameters, v2_parameters.Parameters): raise TypeError('parameters should be parameters') @@ -63,6 +64,7 @@ class SGD(object): self.__parameters__ = parameters self.__topology_in_proto__ = topology.proto() self.__is_local__ = is_local + self.__pserver_spec__ = pserver_spec self.__use_sparse_updater__ = self.__topology__.use_sparse_updater() # # In local mode, disable sparse_remote_update. @@ -126,7 +128,8 @@ class SGD(object): __check_train_args__(**locals()) self.__parameter_updater__ = self.__optimizer__.create_updater( - self.__is_local__, num_passes, self.__use_sparse_updater__) + self.__is_local__, num_passes, self.__use_sparse_updater__, + self.__pserver_spec__) self.__parameter_updater__.init(self.__gradient_machine__) self.__gradient_machine__.start() diff --git a/python/setup.py.in b/python/setup.py.in index 93724f918801ea706517a1df158ceb78a1c2335c..8fe1cfd8b338b9b2e47edcec6d66bbcdd38b5198 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,6 +1,5 @@ from setuptools import setup - packages=['paddle', 'paddle.proto', 'paddle.trainer', @@ -9,7 +8,8 @@ packages=['paddle', 'paddle.v2', 'paddle.v2.dataset', 'paddle.v2.reader', - 'paddle.v2.plot'] + 'paddle.v2.plot', + 'paddle.v2.master'] setup_requires=["requests", "numpy", @@ -25,7 +25,8 @@ setup(name='paddle', description='Parallel Distributed Deep Learning', install_requires=setup_requires, packages=packages, + package_data={'paddle.v2.master': ['libpaddle_master.so'], }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}' - } + }, )