diff --git a/.gitignore b/.gitignore index 2badc3bdaa52f2608183fa34393719be66630654..9e3a0b499f9f42856429f3a42bef313ea3df3699 100644 --- a/.gitignore +++ b/.gitignore @@ -25,12 +25,3 @@ third_party/ # clion workspace. cmake-build-* - -# generated while compiling -paddle/pybind/pybind.h -CMakeFiles -cmake_install.cmake -paddle/.timestamp -python/paddlepaddle.egg-info/ -paddle/fluid/pybind/pybind.h -python/paddle/version.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89c620bb2f7ef634fa80b64eec7037e8cb9a190c..6140340890c0e5025eb08209e8ea78df918b4dc0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,4 @@ +repos: - repo: https://github.com/Lucas-C/pre-commit-hooks.git sha: v1.0.1 hooks: @@ -25,6 +26,14 @@ entry: bash ./.clang_format.hook -i language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ +- repo: local + hooks: + - id: cpplint-cpp-source + name: cpplint + description: Check C++ code style using cpplint.py. + entry: bash ./tools/codestyle/cpplint_pre_commit.hook + language: system + files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$ - repo: https://github.com/PaddlePaddle/pre-commit-golang sha: 8337620115c25ff8333f1b1a493bd031049bd7c0 hooks: diff --git a/.travis.yml b/.travis.yml index bf6a41d13c4eabc2d8543ab821ce0ff747a061df..929c847bd36d64e79a199b2634ebf68c3225429b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,7 @@ addons: - automake - libtool - ccache - ssh_known_hosts: 52.76.173.135 + ssh_known_hosts: 13.229.163.131 before_install: - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python diff --git a/CMakeLists.txt b/CMakeLists.txt index 1e11f86d0ee836f65e69c8398fb26c3b6a1070f6..de47086dbd6a440cd413c7843c83b1c69d9841b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,6 +39,7 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) +option(WITH_TENSORRT "Compile PaddlePaddle with TensorRT support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) @@ -53,8 +54,7 @@ option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF) -# TODO: Only compile PaddlePaddle fluid version by WITH_FLUID option. -option(WITH_FLUID "Compile PaddlePaddle fluid only(TODO)" OFF) +option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) @@ -109,7 +109,7 @@ if (WITH_C_API AND WITH_PYTHON) endif() if (WITH_C_API) - set(WITH_FLUID OFF CACHE STRING "Disable install fluid when compile the C_API" FORCE) + set(WITH_FLUID_ONLY OFF CACHE STRING "Disable install fluid when compile the C_API" FORCE) endif() if(MOBILE_INFERENCE) @@ -147,6 +147,7 @@ include(external/cares) include(external/grpc) include(external/snappy) # download snappy include(external/snappystream) +include(external/threadpool) include(cudnn) # set cudnn libraries, must before configure include(cupti) @@ -181,6 +182,11 @@ if(WITH_GPU) include(cuda) endif(WITH_GPU) +# TensorRT depends on GPU. +if (NOT WITH_GPU) + set(WITH_TENSORRT OFF) +endif() + if(WITH_AMD_GPU) find_package(HIP) include(hip) diff --git a/Dockerfile b/Dockerfile index fbec88c7966d6ea93495519843d6cda63f622661..9097bb657d2366997112ec7662762a93358aa647 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # A image for building paddle binaries # Use cuda devel base image for both cpu and gpu environment -FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04 +FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 MAINTAINER PaddlePaddle Authors ARG UBUNTU_MIRROR @@ -45,6 +45,13 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin # install glide RUN curl -s -q https://glide.sh/get | sh +# Install TensorRT +# The unnecessary files has been removed to make the library small. +RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \ + tar -xz -C /usr/local && \ + cp -rf /usr/local/TensorRT/include /usr && \ + cp -rf /usr/local/TensorRT/lib /usr + # git credential to skip password typing RUN git config --global credential.helper store @@ -57,7 +64,7 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # specify sphinx version as 1.5.6 and remove -U option for [pip install -U # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest # version(1.7.1 for now), which causes building documentation failed. -RUN pip install --upgrade pip && \ +RUN pip install --upgrade pip==9.0.3 && \ pip install -U wheel && \ pip install -U docopt PyYAML sphinx==1.5.6 && \ pip install sphinx-rtd-theme==0.1.9 recommonmark diff --git a/benchmark/cluster/README.md b/benchmark/cluster/README.md index b619613ea7a5b6e940ec735314e8e47338b2c600..64816098a524f064ec12474a736cd4c721227a70 100644 --- a/benchmark/cluster/README.md +++ b/benchmark/cluster/README.md @@ -36,11 +36,41 @@ - Trainer Count: 100 - Metrics: mini-batch / sec -| Batch Size | 32 | 64 | 128 | 256 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | - | - | - | - | -| PaddlePaddle v2 | - | - | - | - | -| TensorFlow | - | - | - | - | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Batch Size 3264128 256
PaddlePaddle Fluid-- - -
PaddlePaddle v2 - - - -
TensorFlow - - - -
### Measure the Performance for Different PServer Count @@ -48,11 +78,41 @@ - Batch Size: 64 - Metrics: mini-batch / sec -| PServer Count | 10 | 20 | 40 | 60 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | - | - | - | - | -| PaddlePaddle v2 | - | - | - | - | -| TensorFlow | - | - | - | - | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PServer Count 102040 60
PaddlePaddle Fluid-- - -
PaddlePaddle v2 - - - -
TensorFlow - - - -
### Measure Parallel Efficiency By Increasing Trainer Count @@ -67,11 +127,69 @@ The parallel efficiency is: $E = \div(S, N)$ -| Trainer Counter | 1 | 10 | 20 | 30 | 40 | 50 | 60 | 70 | 80 | 90 | 100 | -| -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | - | - | - | - | - | - | - | - | - | - | - | -| PaddlePaddle v2 | - | - | - | - | - | - | - | - | - | - | - | - | -| TensorFlow | - | - | - | - | - | - | - | - | - | - | - | - | - | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Trainer Counter 11020 30405060 708090100
PaddlePaddle Fluid-- - - -- - - -- -
PaddlePaddle v2 - - - - -- - - -- -
TensorFlow - - - - -- - - -- -
+ ## Reproduce the benchmark diff --git a/benchmark/cluster/vgg16/README.md b/benchmark/cluster/vgg16/README.md index cd681a1a282d9a26eac1c267bfa26967f8c3c9fd..d56a912b9b03986e32693363f82df05a34b779e9 100644 --- a/benchmark/cluster/vgg16/README.md +++ b/benchmark/cluster/vgg16/README.md @@ -16,11 +16,41 @@ Setting environment variable: `MKL_NUM_THREADS=1`. - Metrics: samples / sec -| Batch Size | 32 | 64 | 128 | 256 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | 15.44 | 16.32 | 16.74 | 16.79 | -| PaddlePaddle v2 | 15.97 | 17.04 | 17.60 | 17.83 | -| TensorFlow | 9.09 | 9.10 | 9.24 | 8.66 | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Batch Size 3264128 256
PaddlePaddle Fluid 15.44 16.32 16.74 16.79
PaddlePaddle v2 15.97 17.04 17.60 17.83
TensorFlow 9.09 9.10 9.24 8.66
+ ### Different Batch Size @@ -28,12 +58,40 @@ Setting environment variable: `MKL_NUM_THREADS=1`. - Trainer Count: 20 - Metrics: samples / sec -| Batch Size | 32 | 64 | 128 | 256 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | 190.20 | 222.15 | 247.40 | 258.18 | -| PaddlePaddle v2 | 170.96 | 233.71 | 256.14 | 329.23 | -| TensorFlow | - | - | - | - | - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Batch Size 3264128 256
PaddlePaddle Fluid 190.20 222.15 247.40 258.18
PaddlePaddle v2 170.96 233.71 256.14 329.23
TensorFlow - - - -
### Accelerate Rate @@ -41,11 +99,41 @@ Setting environment variable: `MKL_NUM_THREADS=1`. - Batch Size: 128 - Metrics: samples / sec -| Trainer Count | 20 | 40 | 80 | 100 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid | 263.29 (78.64%) | 518.80 (77.47%) | 836.26 (62.44%) | 1019.29 (60.89%) | -| PaddlePaddle v2 (need more tests) | 326.85 (92.85%) | 534.58 (75.93%) | 853.30 (60.60%) | 1041.99 (59.20%) | -| TensorFlow | - | - | - | - | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Trainer Count 204080100
PaddlePaddle Fluid 263.29 (78.64%) 518.80 (77.47%) 836.26 (62.44%) 1019.29 (60.89%)
PaddlePaddle v2 (need more tests) 326.85 (92.85%) 534.58 (75.93%) 853.30 (60.60%) 1041.99 (59.20%)
TensorFlow - - - -
+ ### Different Pserver Count @@ -53,11 +141,41 @@ Setting environment variable: `MKL_NUM_THREADS=1`. - Batch Size: 128 - Metrics: samples/ sec -| PServer Count | 3 | 6 |10 | 20 | -| -- | -- | -- | -- | -- | -| PaddlePaddle Fluid(should fix in next PR) | 589.1 | 592.6 | 656.4 | 655.8 | -| PaddlePaddle v2 | 593.4 | 791.3 | 729.7 | 821.7 | -| TensorFlow | - | - | - | - | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PServer Count 361020
PaddlePaddle Fluid(should fix in next PR) 589.1 592.6 656.4 655.8
PaddlePaddle v2 (need more tests) 593.4 791.3 729.7 821.7
TensorFlow - - - -
+ *The performance gap between Fuild and v2 comes from the network interference.* diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/machine_translation.py new file mode 100644 index 0000000000000000000000000000000000000000..d7a421c10979c3b9d6865a8c0b99a6410e0f46a8 --- /dev/null +++ b/benchmark/fluid/machine_translation.py @@ -0,0 +1,379 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""seq2seq model for fluid.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import argparse +import time +import distutils.util + +import paddle.v2 as paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +from paddle.fluid.executor import Executor + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--embedding_dim", + type=int, + default=512, + help="The dimension of embedding table. (default: %(default)d)") +parser.add_argument( + "--encoder_size", + type=int, + default=512, + help="The size of encoder bi-rnn unit. (default: %(default)d)") +parser.add_argument( + "--decoder_size", + type=int, + default=512, + help="The size of decoder rnn unit. (default: %(default)d)") +parser.add_argument( + "--batch_size", + type=int, + default=16, + help="The sequence number of a mini-batch data. (default: %(default)d)") +parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test') +parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') +parser.add_argument( + "--dict_size", + type=int, + default=30000, + help="The dictionary capacity. Dictionaries of source sequence and " + "target dictionary have same capacity. (default: %(default)d)") +parser.add_argument( + "--pass_num", + type=int, + default=2, + help="The pass number to train. (default: %(default)d)") +parser.add_argument( + "--learning_rate", + type=float, + default=0.0002, + help="Learning rate used to train the model. (default: %(default)f)") +parser.add_argument( + "--infer_only", action='store_true', help="If set, run forward only.") +parser.add_argument( + "--beam_size", + type=int, + default=3, + help="The width for beam searching. (default: %(default)d)") +parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help="The device type.") +parser.add_argument( + "--max_length", + type=int, + default=250, + help="The maximum length of sequence when doing generation. " + "(default: %(default)d)") +parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') + + +def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): + def linear(inputs): + return fluid.layers.fc(input=inputs, size=size, bias_attr=True) + + forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) + input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) + output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) + cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t])) + + cell_t = fluid.layers.sums(input=[ + fluid.layers.elementwise_mul( + x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul( + x=input_gate, y=cell_tilde) + ]) + + hidden_t = fluid.layers.elementwise_mul( + x=output_gate, y=fluid.layers.tanh(x=cell_t)) + + return hidden_t, cell_t + + +def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim, + target_dict_dim, is_generating, beam_size, max_length): + """Construct a seq2seq network.""" + + def bi_lstm_encoder(input_seq, gate_size): + # Linear transformation part for input gate, output gate, forget gate + # and cell activation vectors need be done outside of dynamic_lstm. + # So the output size is 4 times of gate_size. + input_forward_proj = fluid.layers.fc(input=input_seq, + size=gate_size * 4, + act=None, + bias_attr=False) + forward, _ = fluid.layers.dynamic_lstm( + input=input_forward_proj, size=gate_size * 4, use_peepholes=False) + input_reversed_proj = fluid.layers.fc(input=input_seq, + size=gate_size * 4, + act=None, + bias_attr=False) + reversed, _ = fluid.layers.dynamic_lstm( + input=input_reversed_proj, + size=gate_size * 4, + is_reverse=True, + use_peepholes=False) + return forward, reversed + + src_word_idx = fluid.layers.data( + name='source_sequence', shape=[1], dtype='int64', lod_level=1) + + src_embedding = fluid.layers.embedding( + input=src_word_idx, + size=[source_dict_dim, embedding_dim], + dtype='float32') + + src_forward, src_reversed = bi_lstm_encoder( + input_seq=src_embedding, gate_size=encoder_size) + + encoded_vector = fluid.layers.concat( + input=[src_forward, src_reversed], axis=1) + + encoded_proj = fluid.layers.fc(input=encoded_vector, + size=decoder_size, + bias_attr=False) + + backward_first = fluid.layers.sequence_pool( + input=src_reversed, pool_type='first') + + decoder_boot = fluid.layers.fc(input=backward_first, + size=decoder_size, + bias_attr=False, + act='tanh') + + def lstm_decoder_with_attention(target_embedding, encoder_vec, encoder_proj, + decoder_boot, decoder_size): + def simple_attention(encoder_vec, encoder_proj, decoder_state): + decoder_state_proj = fluid.layers.fc(input=decoder_state, + size=decoder_size, + bias_attr=False) + decoder_state_expand = fluid.layers.sequence_expand( + x=decoder_state_proj, y=encoder_proj) + concated = fluid.layers.concat( + input=[encoder_proj, decoder_state_expand], axis=1) + attention_weights = fluid.layers.fc(input=concated, + size=1, + act='tanh', + bias_attr=False) + attention_weights = fluid.layers.sequence_softmax( + input=attention_weights) + weigths_reshape = fluid.layers.reshape( + x=attention_weights, shape=[-1]) + scaled = fluid.layers.elementwise_mul( + x=encoder_vec, y=weigths_reshape, axis=0) + context = fluid.layers.sequence_pool(input=scaled, pool_type='sum') + return context + + rnn = fluid.layers.DynamicRNN() + + cell_init = fluid.layers.fill_constant_batch_size_like( + input=decoder_boot, + value=0.0, + shape=[-1, decoder_size], + dtype='float32') + cell_init.stop_gradient = False + + with rnn.block(): + current_word = rnn.step_input(target_embedding) + encoder_vec = rnn.static_input(encoder_vec) + encoder_proj = rnn.static_input(encoder_proj) + hidden_mem = rnn.memory(init=decoder_boot, need_reorder=True) + cell_mem = rnn.memory(init=cell_init) + context = simple_attention(encoder_vec, encoder_proj, hidden_mem) + decoder_inputs = fluid.layers.concat( + input=[context, current_word], axis=1) + h, c = lstm_step(decoder_inputs, hidden_mem, cell_mem, decoder_size) + rnn.update_memory(hidden_mem, h) + rnn.update_memory(cell_mem, c) + out = fluid.layers.fc(input=h, + size=target_dict_dim, + bias_attr=True, + act='softmax') + rnn.output(out) + return rnn() + + if not is_generating: + trg_word_idx = fluid.layers.data( + name='target_sequence', shape=[1], dtype='int64', lod_level=1) + + trg_embedding = fluid.layers.embedding( + input=trg_word_idx, + size=[target_dict_dim, embedding_dim], + dtype='float32') + + prediction = lstm_decoder_with_attention(trg_embedding, encoded_vector, + encoded_proj, decoder_boot, + decoder_size) + label = fluid.layers.data( + name='label_sequence', shape=[1], dtype='int64', lod_level=1) + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + + feeding_list = ["source_sequence", "target_sequence", "label_sequence"] + + return avg_cost, feeding_list + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + lod_t = core.LoDTensor() + lod_t.set(flattened_data, place) + lod_t.set_lod([lod]) + return lod_t, lod[-1] + + +def lodtensor_to_ndarray(lod_tensor): + dims = lod_tensor.get_dims() + ndarray = np.zeros(shape=dims).astype('float32') + for i in xrange(np.product(dims)): + ndarray.ravel()[i] = lod_tensor.get_float_element(i) + return ndarray + + +def train(): + avg_cost, feeding_list = seq_to_seq_net( + args.embedding_dim, + args.encoder_size, + args.decoder_size, + args.dict_size, + args.dict_size, + False, + beam_size=args.beam_size, + max_length=args.max_length) + + # clone from default main program + inference_program = fluid.default_main_program().clone() + + optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) + optimizer.minimize(avg_cost) + + fluid.memory_optimize(fluid.default_main_program()) + + train_batch_generator = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(args.dict_size), buf_size=1000), + batch_size=args.batch_size) + + test_batch_generator = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.test(args.dict_size), buf_size=1000), + batch_size=args.batch_size) + + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) + exe = Executor(place) + exe.run(framework.default_startup_program()) + + def do_validation(): + total_loss = 0.0 + count = 0 + for batch_id, data in enumerate(test_batch_generator()): + src_seq = to_lodtensor(map(lambda x: x[0], data), place)[0] + trg_seq = to_lodtensor(map(lambda x: x[1], data), place)[0] + lbl_seq = to_lodtensor(map(lambda x: x[2], data), place)[0] + + fetch_outs = exe.run(inference_program, + feed={ + feeding_list[0]: src_seq, + feeding_list[1]: trg_seq, + feeding_list[2]: lbl_seq + }, + fetch_list=[avg_cost], + return_numpy=False) + + total_loss += lodtensor_to_ndarray(fetch_outs[0])[0] + count += 1 + + return total_loss / count + + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in xrange(args.pass_num): + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_batch_generator()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + src_seq, word_num = to_lodtensor(map(lambda x: x[0], data), place) + num_samples += word_num + trg_seq, word_num = to_lodtensor(map(lambda x: x[1], data), place) + num_samples += word_num + lbl_seq, _ = to_lodtensor(map(lambda x: x[2], data), place) + + fetch_outs = exe.run(framework.default_main_program(), + feed={ + feeding_list[0]: src_seq, + feeding_list[1]: trg_seq, + feeding_list[2]: lbl_seq + }, + fetch_list=[avg_cost]) + + iters += 1 + loss = np.array(fetch_outs[0]) + print( + "Pass = %d, Iter = %d, Loss = %f" % (pass_id, iters, loss) + ) # The accuracy is the accumulation of batches, but not the current batch. + + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + test_loss = do_validation() + exit(0) + + +def infer(): + pass + + +def print_arguments(args): + print('----------- seq2seq Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + args = parser.parse_args() + print_arguments(args) + if args.infer_only: + infer() + else: + train() diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..dc10ac2ec195acc9a5693718141ddb32417dfb71 --- /dev/null +++ b/benchmark/fluid/mnist.py @@ -0,0 +1,224 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import argparse +import time + +import paddle.v2 as paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler + +SEED = 1 +DTYPE = "float32" + +# random seed must set before configuring the network. +# fluid.default_startup_program().random_seed = SEED + + +def parse_args(): + parser = argparse.ArgumentParser("mnist model benchmark.") + parser.add_argument( + '--batch_size', type=int, default=128, help='The minibatch size.') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', type=int, default=35, help='The number of minibatches.') + parser.add_argument( + '--pass_num', type=int, default=5, help='The number of passes.') + parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help='The device type.') + parser.add_argument( + '--infer_only', action='store_true', help='If set, run forward only.') + parser.add_argument( + '--use_cprof', action='store_true', help='If set, use cProfile.') + parser.add_argument( + '--use_nvprof', + action='store_true', + help='If set, use nvprof for CUDA.') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') + args = parser.parse_args() + return args + + +def cnn_model(data): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=data, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + + # TODO(dzhwinter) : refine the initializer and random seed settting + SIZE = 10 + input_shape = conv_pool_2.shape + param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + + predict = fluid.layers.fc( + input=conv_pool_2, + size=SIZE, + act="softmax", + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale))) + return predict + + +def eval_test(exe, batch_acc, batch_size_tensor, inference_program): + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=args.batch_size) + test_pass_acc = fluid.average.WeightedAverage() + for batch_id, data in enumerate(test_reader()): + img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), + data)).astype(DTYPE) + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([len(y_data), 1]) + + acc, weight = exe.run(inference_program, + feed={"pixel": img_data, + "label": y_data}, + fetch_list=[batch_acc, batch_size_tensor]) + test_pass_acc.add(value=acc, weight=weight) + pass_acc = test_pass_acc.eval() + return pass_acc + + +def run_benchmark(model, args): + if args.use_cprof: + pr = cProfile.Profile() + pr.enable() + start_time = time.time() + # Input data + images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Train program + predict = model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + # inference program + inference_program = fluid.default_main_program().clone() + + # Optimization + opt = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999) + opt.minimize(avg_cost) + + fluid.memory_optimize(fluid.default_main_program()) + + # Initialize executor + place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + # Parameter initialization + exe.run(fluid.default_startup_program()) + + # Reader + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=args.batch_size) + + accuracy = fluid.metrics.Accuracy() + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.pass_num): + accuracy.reset() + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + img_data = np.array( + map(lambda x: x[0].reshape([1, 28, 28]), data)).astype(DTYPE) + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([len(y_data), 1]) + + outs = exe.run( + fluid.default_main_program(), + feed={"pixel": img_data, + "label": y_data}, + fetch_list=[avg_cost, batch_acc, batch_size_tensor] + ) # The accuracy is the accumulation of batches, but not the current batch. + accuracy.update(value=outs[1], weight=outs[2]) + iters += 1 + num_samples += len(y_data) + loss = np.array(outs[0]) + acc = np.array(outs[1]) + train_losses.append(loss) + train_accs.append(acc) + print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % + (pass_id, iters, loss, acc)) + + print("Pass: %d, Loss: %f, Train Accuray: %f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + test_avg_acc = eval_test(exe, batch_acc, batch_size_tensor, + inference_program) + exit(0) + + +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + print('----------- mnist Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + args = parse_args() + print_arguments(args) + if args.use_nvprof and args.device == 'GPU': + with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + run_benchmark(cnn_model, args) + else: + run_benchmark(cnn_model, args) diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1af5eaf6b46be47cb6b778cedcf53830c201ef39 --- /dev/null +++ b/benchmark/fluid/resnet.py @@ -0,0 +1,313 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import functools +import numpy as np +import time + +import cProfile, pstats, StringIO + +import paddle.v2 as paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.profiler as profiler + + +def parse_args(): + parser = argparse.ArgumentParser('Convolution model benchmark.') + parser.add_argument( + '--model', + type=str, + choices=['resnet_imagenet', 'resnet_cifar10'], + default='resnet_imagenet', + help='The model architecture.') + parser.add_argument( + '--batch_size', type=int, default=32, help='The minibatch size.') + parser.add_argument( + '--use_fake_data', + action='store_true', + help='use real data or fake data') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') + parser.add_argument( + '--pass_num', type=int, default=100, help='The number of passes.') + parser.add_argument( + '--data_format', + type=str, + default='NCHW', + choices=['NCHW', 'NHWC'], + help='The data data_format, now only support NCHW.') + parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help='The device type.') + parser.add_argument( + '--data_set', + type=str, + default='flowers', + choices=['cifar10', 'flowers'], + help='Optional dataset for benchmark.') + parser.add_argument( + '--infer_only', action='store_true', help='If set, run forward only.') + parser.add_argument( + '--use_cprof', action='store_true', help='If set, use cProfile.') + parser.add_argument( + '--use_nvprof', + action='store_true', + help='If set, use nvprof for CUDA.') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') + args = parser.parse_args() + return args + + +def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): + conv1 = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=False) + return fluid.layers.batch_norm(input=conv1, act=act) + + +def shortcut(input, ch_out, stride): + ch_in = input.shape[1] if args.data_format == 'NCHW' else input.shape[-1] + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + +def basicblock(input, ch_out, stride): + short = shortcut(input, ch_out, stride) + conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) + conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, act=None) + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def bottleneck(input, ch_out, stride): + short = shortcut(input, ch_out * 4, stride) + conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) + conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) + conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, act=None) + return fluid.layers.elementwise_add(x=short, y=conv3, act='relu') + + +def layer_warp(block_func, input, ch_out, count, stride): + res_out = block_func(input, ch_out, stride) + for i in range(1, count): + res_out = block_func(res_out, ch_out, 1) + return res_out + + +def resnet_imagenet(input, class_dim, depth=50, data_format='NCHW'): + + cfg = { + 18: ([2, 2, 2, 1], basicblock), + 34: ([3, 4, 6, 3], basicblock), + 50: ([3, 4, 6, 3], bottleneck), + 101: ([3, 4, 23, 3], bottleneck), + 152: ([3, 8, 36, 3], bottleneck) + } + stages, block_func = cfg[depth] + conv1 = conv_bn_layer(input, ch_out=64, filter_size=7, stride=2, padding=3) + pool1 = fluid.layers.pool2d( + input=conv1, pool_type='avg', pool_size=3, pool_stride=2) + res1 = layer_warp(block_func, pool1, 64, stages[0], 1) + res2 = layer_warp(block_func, res1, 128, stages[1], 2) + res3 = layer_warp(block_func, res2, 256, stages[2], 2) + res4 = layer_warp(block_func, res3, 512, stages[3], 2) + pool2 = fluid.layers.pool2d( + input=res4, + pool_size=7, + pool_type='avg', + pool_stride=1, + global_pooling=True) + out = fluid.layers.fc(input=pool2, size=class_dim, act='softmax') + return out + + +def resnet_cifar10(input, class_dim, depth=32, data_format='NCHW'): + assert (depth - 2) % 6 == 0 + + n = (depth - 2) // 6 + + conv1 = conv_bn_layer( + input=input, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, n, 1) + res2 = layer_warp(basicblock, res1, 32, n, 2) + res3 = layer_warp(basicblock, res2, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + out = fluid.layers.fc(input=pool, size=class_dim, act='softmax') + return out + + +def run_benchmark(model, args): + if args.use_cprof: + pr = cProfile.Profile() + pr.enable() + + if args.data_set == "cifar10": + class_dim = 10 + if args.data_format == 'NCHW': + dshape = [3, 32, 32] + else: + dshape = [32, 32, 3] + else: + class_dim = 102 + if args.data_format == 'NCHW': + dshape = [3, 224, 224] + else: + dshape = [224, 224, 3] + + input = fluid.layers.data(name='data', shape=dshape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + predict = model(input, class_dim) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc, batch_size_tensor]) + + optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + opts = optimizer.minimize(avg_cost) + + fluid.memory_optimize(fluid.default_main_program()) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.dataset.cifar.test10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), + batch_size=args.batch_size) + + def test(exe): + test_accuracy = fluid.average.WeightedAverage() + for batch_id, data in enumerate(test_reader()): + img_data = np.array(map(lambda x: x[0].reshape(dshape), + data)).astype("float32") + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([-1, 1]) + + acc, weight = exe.run(inference_program, + feed={"data": img_data, + "label": y_data}, + fetch_list=[batch_acc, batch_size_tensor]) + test_accuracy.add(value=acc, weight=weight) + + return test_accuracy.eval() + + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + accuracy = fluid.average.WeightedAverage() + if args.use_fake_data: + data = train_reader().next() + image = np.array(map(lambda x: x[0].reshape(dshape), data)).astype( + 'float32') + label = np.array(map(lambda x: x[1], data)).astype('int64') + label = label.reshape([-1, 1]) + + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.pass_num): + accuracy.reset() + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + if not args.use_fake_data: + image = np.array(map(lambda x: x[0].reshape(dshape), + data)).astype('float32') + label = np.array(map(lambda x: x[1], data)).astype('int64') + label = label.reshape([-1, 1]) + loss, acc, weight = exe.run( + fluid.default_main_program(), + feed={'data': image, + 'label': label}, + fetch_list=[avg_cost, batch_acc, batch_size_tensor]) + iters += 1 + num_samples += len(label) + accuracy.add(value=acc, weight=weight) + train_losses.append(loss) + train_accs.append(acc) + print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % + (pass_id, iters, loss, acc)) + print("Pass: %d, Loss: %f, Train Accuray: %f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + pass_test_acc = test(exe) + exit(0) + + +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + print('----------- resnet Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + model_map = { + 'resnet_imagenet': resnet_imagenet, + 'resnet_cifar10': resnet_cifar10 + } + args = parse_args() + print_arguments(args) + if args.data_format == 'NHWC': + raise ValueError('Only support NCHW data_format now.') + if args.use_nvprof and args.device == 'GPU': + with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + run_benchmark(model_map[args.model], args) + else: + run_benchmark(model_map[args.model], args) diff --git a/benchmark/fluid/run.sh b/benchmark/fluid/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..f6dfd20bf2ee0b668b6d4238d4511253b2233035 --- /dev/null +++ b/benchmark/fluid/run.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# This script benchmarking the PaddlePaddle Fluid on +# single thread single GPU. + +#export FLAGS_fraction_of_gpu_memory_to_use=0.0 +export CUDNN_PATH=/paddle/cudnn_v5 + +# disable openmp and mkl parallel +#https://github.com/PaddlePaddle/Paddle/issues/7199 +export MKL_NUM_THREADS=1 +export OMP_NUM_THREADS=1 +ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs` +if [ $ht -eq 1 ]; then # HT is OFF + if [ -z "$KMP_AFFINITY" ]; then + export KMP_AFFINITY="granularity=fine,compact,0,0" + fi + if [ -z "$OMP_DYNAMIC" ]; then + export OMP_DYNAMIC="FALSE" + fi +else # HT is ON + if [ -z "$KMP_AFFINITY" ]; then + export KMP_AFFINITY="granularity=fine,compact,1,0" + fi +fi +# disable multi-gpu if have more than one +export CUDA_VISIBLE_DEVICES=0 +export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=$CUDNN_PATH:$LD_LIBRARY_PATH + +# only query the gpu used +nohup stdbuf -oL nvidia-smi \ + --id=${CUDA_VISIBLE_DEVICES} \ + --query-gpu=timestamp \ + --query-compute-apps=pid,process_name,used_memory \ + --format=csv \ + --filename=mem.log \ + -l 1 & +# mnist +# mnist gpu mnist 128 +FLAGS_benchmark=true stdbuf -oL python fluid/mnist.py \ + --device=GPU \ + --batch_size=128 \ + --skip_batch_num=5 \ + --iterations=500 \ + 2>&1 | tee -a mnist_gpu_128.log + +# vgg16 +# gpu cifar10 128 +FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ + --device=GPU \ + --batch_size=128 \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a vgg16_gpu_128.log + +# flowers gpu 128 +FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ + --device=GPU \ + --batch_size=32 \ + --data_set=flowers \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a vgg16_gpu_flowers_32.log + +# resnet50 +# resnet50 gpu cifar10 128 +FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ + --device=GPU \ + --batch_size=128 \ + --data_set=cifar10 \ + --model=resnet_cifar10 \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a resnet50_gpu_128.log + +# resnet50 gpu flowers 64 +FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ + --device=GPU \ + --batch_size=64 \ + --data_set=flowers \ + --model=resnet_imagenet \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a resnet50_gpu_flowers_64.log + +# lstm +# lstm gpu imdb 32 # tensorflow only support batch=32 +FLAGS_benchmark=true stdbuf -oL python fluid/stacked_dynamic_lstm.py \ + --device=GPU \ + --batch_size=32 \ + --skip_batch_num=5 \ + --iterations=30 \ + --hidden_dim=512 \ + --emb_dim=512 \ + --crop_size=1500 \ + 2>&1 | tee -a lstm_gpu_32.log + +# seq2seq +# seq2seq gpu wmb 128 +FLAGS_benchmark=true stdbuf -oL python fluid/machine_translation.py \ + --device=GPU \ + --batch_size=128 \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a lstm_gpu_128.log diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..5fcbdd64af9dc196c9d5b2b82ce4213478ea1418 --- /dev/null +++ b/benchmark/fluid/stacked_dynamic_lstm.py @@ -0,0 +1,236 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import cPickle +import os +import random +import time + +import numpy +import paddle.v2 as paddle +import paddle.v2.dataset.imdb as imdb +import paddle.fluid as fluid +from paddle.v2 import batch +import paddle.fluid.profiler as profiler + + +def parse_args(): + parser = argparse.ArgumentParser("Understand Sentiment by Dynamic RNN.") + parser.add_argument( + '--batch_size', + type=int, + default=32, + help='The sequence number of a batch data. (default: %(default)d)') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') + parser.add_argument( + '--emb_dim', + type=int, + default=512, + help='Dimension of embedding table. (default: %(default)d)') + parser.add_argument( + '--hidden_dim', + type=int, + default=512, + help='Hidden size of lstm unit. (default: %(default)d)') + parser.add_argument( + '--pass_num', + type=int, + default=100, + help='Epoch number to train. (default: %(default)d)') + parser.add_argument( + '--device', + type=str, + default='CPU', + choices=['CPU', 'GPU'], + help='The device type.') + parser.add_argument( + '--crop_size', + type=int, + default=int(os.environ.get('CROP_SIZE', '1500')), + help='The max sentence length of input. Since this model use plain RNN,' + ' Gradient could be explored if sentence is too long') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') + args = parser.parse_args() + return args + + +word_dict = imdb.word_dict() + + +def crop_sentence(reader, crop_size): + unk_value = word_dict[''] + + def __impl__(): + for item in reader(): + if len([x for x in item[0] if x != unk_value]) < crop_size: + yield item + + return __impl__ + + +def main(): + args = parse_args() + lstm_size = args.hidden_dim + + data = fluid.layers.data( + name="words", shape=[1], lod_level=1, dtype='int64') + sentence = fluid.layers.embedding( + input=data, size=[len(word_dict), args.emb_dim]) + + sentence = fluid.layers.fc(input=sentence, size=lstm_size, act='tanh') + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + word = rnn.step_input(sentence) + prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) + prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) + + def gate_common( + ipt, + hidden, + size, ): + gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) + gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) + gate = fluid.layers.sums(input=[gate0, gate1]) + return gate + + forget_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + input_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + output_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + cell_gate = fluid.layers.tanh( + x=gate_common(word, prev_hidden, lstm_size)) + + cell = fluid.layers.sums(input=[ + fluid.layers.elementwise_mul( + x=forget_gate, y=prev_cell), fluid.layers.elementwise_mul( + x=input_gate, y=cell_gate) + ]) + + hidden = fluid.layers.elementwise_mul( + x=output_gate, y=fluid.layers.tanh(x=cell)) + + rnn.update_memory(prev_cell, cell) + rnn.update_memory(prev_hidden, hidden) + rnn.output(hidden) + + last = fluid.layers.sequence_pool(rnn(), 'last') + logit = fluid.layers.fc(input=last, size=2, act='softmax') + loss = fluid.layers.cross_entropy( + input=logit, + label=fluid.layers.data( + name='label', shape=[1], dtype='int64')) + loss = fluid.layers.mean(x=loss) + + # add acc + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy(input=logit, label=fluid.layers.data(name='label', \ + shape=[1], dtype='int64'), total=batch_size_tensor) + + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc, batch_size_tensor]) + + adam = fluid.optimizer.Adam() + adam.minimize(loss) + + fluid.memory_optimize(fluid.default_main_program()) + + place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + train_reader = batch( + paddle.reader.shuffle( + crop_sentence(imdb.train(word_dict), args.crop_size), + buf_size=25000), + batch_size=args.batch_size) + + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.pass_num): + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + tensor_words = to_lodtensor([x[0] for x in data], place) + label = numpy.array([x[1] for x in data]).astype("int64") + label = label.reshape((-1, 1)) + loss_np, acc, weight = exe.run( + fluid.default_main_program(), + feed={"words": tensor_words, + "label": label}, + fetch_list=[loss, batch_acc, batch_size_tensor]) + iters += 1 + for x in data: + num_samples += len(x[0]) + print( + "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % + (pass_id, iters, loss_np, acc) + ) # The accuracy is the accumulation of batches, but not the current batch. + + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + exit(0) + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = numpy.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = fluid.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def print_arguments(args): + print('----------- lstm Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + args = parse_args() + print_arguments(args) + main() diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..9d990eff62ec368dc7033f55cc0862fa974a64e0 --- /dev/null +++ b/benchmark/fluid/vgg.py @@ -0,0 +1,224 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""VGG16 benchmark in Fluid""" +from __future__ import print_function + +import sys +import time +import numpy as np +import paddle.v2 as paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import argparse +import functools + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--batch_size', type=int, default=128, help="Batch size for training.") +parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test') +parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') +parser.add_argument( + '--learning_rate', + type=float, + default=1e-3, + help="Learning rate for training.") +parser.add_argument('--pass_num', type=int, default=50, help="No. of passes.") +parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help="The device type.") +parser.add_argument( + '--data_format', + type=str, + default='NCHW', + choices=['NCHW', 'NHWC'], + help='The data order, now only support NCHW.') +parser.add_argument( + '--data_set', + type=str, + default='cifar10', + choices=['cifar10', 'flowers'], + help='Optional dataset for benchmark.') +parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') +args = parser.parse_args() + + +def vgg16_bn_drop(input): + def conv_block(input, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=512, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=512, act=None) + return fc2 + + +def main(): + if args.data_set == "cifar10": + classdim = 10 + if args.data_format == 'NCHW': + data_shape = [3, 32, 32] + else: + data_shape = [32, 32, 3] + else: + classdim = 102 + if args.data_format == 'NCHW': + data_shape = [3, 224, 224] + else: + data_shape = [224, 224, 3] + + # Input data + images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Train program + net = vgg16_bn_drop(images) + predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + # inference program + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc, batch_size_tensor]) + + # Optimization + optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) + opts = optimizer.minimize(avg_cost) + + fluid.memory_optimize(fluid.default_main_program()) + + # Initialize executor + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) + exe = fluid.Executor(place) + + # Parameter initialization + exe.run(fluid.default_startup_program()) + + # data reader + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.dataset.cifar.test10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), + batch_size=args.batch_size) + + # test + def test(exe): + test_accuracy = fluid.average.WeightedAverage() + for batch_id, data in enumerate(test_reader()): + img_data = np.array(map(lambda x: x[0].reshape(data_shape), + data)).astype("float32") + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([-1, 1]) + + acc, weight = exe.run(inference_program, + feed={"pixel": img_data, + "label": y_data}, + fetch_list=[batch_acc, batch_size_tensor]) + test_accuracy.add(value=acc, weight=weight) + return test_accuracy.eval() + + iters, num_samples, start_time = 0, 0, time.time() + accuracy = fluid.average.WeightedAverage() + for pass_id in range(args.pass_num): + accuracy.reset() + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + img_data = np.array(map(lambda x: x[0].reshape(data_shape), + data)).astype("float32") + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([-1, 1]) + + loss, acc, weight = exe.run( + fluid.default_main_program(), + feed={"pixel": img_data, + "label": y_data}, + fetch_list=[avg_cost, batch_acc, batch_size_tensor]) + accuracy.add(value=acc, weight=weight) + iters += 1 + num_samples += len(y_data) + print( + "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % + (pass_id, iters, loss, acc) + ) # The accuracy is the accumulation of batches, but not the current batch. + + # pass_train_acc = accuracy.eval() + train_losses.append(loss) + train_accs.append(acc) + print("Pass: %d, Loss: %f, Train Accuray: %f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + pass_test_acc = test(exe) + exit(0) + + +def print_arguments(): + print('----------- vgg Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == "__main__": + print_arguments() + main() diff --git a/benchmark/tensorflow/machine_translation.py b/benchmark/tensorflow/machine_translation.py new file mode 100644 index 0000000000000000000000000000000000000000..8f77dce98353af53803246be8dc61063836b7867 --- /dev/null +++ b/benchmark/tensorflow/machine_translation.py @@ -0,0 +1,626 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.python.framework import dtypes +from tensorflow.python.layers.core import Dense +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.framework import ops +from tensorflow.python.ops import rnn_cell_impl +from tensorflow.python.ops.rnn_cell_impl import RNNCell, BasicLSTMCell +from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple +from tensorflow.contrib.rnn.python.ops import core_rnn_cell +from tensorflow.python.ops import array_ops +from tensorflow.python.util import nest +import tensorflow.contrib.seq2seq as seq2seq +from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder +import numpy as np +import os +import argparse +import time + +import paddle.v2 as paddle + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--embedding_dim", + type=int, + default=512, + help="The dimension of embedding table. (default: %(default)d)") +parser.add_argument( + "--encoder_size", + type=int, + default=512, + help="The size of encoder bi-rnn unit. (default: %(default)d)") +parser.add_argument( + "--decoder_size", + type=int, + default=512, + help="The size of decoder rnn unit. (default: %(default)d)") +parser.add_argument( + "--batch_size", + type=int, + default=128, + help="The sequence number of a mini-batch data. (default: %(default)d)") +parser.add_argument( + "--dict_size", + type=int, + default=30000, + help="The dictionary capacity. Dictionaries of source sequence and " + "target dictionary have same capacity. (default: %(default)d)") +parser.add_argument( + "--max_time_steps", + type=int, + default=81, + help="Max number of time steps for sequence. (default: %(default)d)") +parser.add_argument( + "--pass_num", + type=int, + default=10, + help="The pass number to train. (default: %(default)d)") +parser.add_argument( + "--learning_rate", + type=float, + default=0.0002, + help="Learning rate used to train the model. (default: %(default)f)") +parser.add_argument( + "--infer_only", action='store_true', help="If set, run forward only.") +parser.add_argument( + "--beam_size", + type=int, + default=3, + help="The width for beam searching. (default: %(default)d)") +parser.add_argument( + "--max_generation_length", + type=int, + default=250, + help="The maximum length of sequence when doing generation. " + "(default: %(default)d)") +parser.add_argument( + "--save_freq", + type=int, + default=500, + help="Save model checkpoint every this interation. (default: %(default)d)") +parser.add_argument( + "--model_dir", + type=str, + default='./checkpoint', + help="Path to save model checkpoints. (default: %(default)d)") + +_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name + +START_TOKEN_IDX = 0 +END_TOKEN_IDX = 1 + + +class LSTMCellWithSimpleAttention(RNNCell): + """Add attention mechanism to BasicLSTMCell. + This class is a wrapper based on tensorflow's `BasicLSTMCell`. + """ + + def __init__(self, + num_units, + encoder_vector, + encoder_proj, + source_sequence_length, + forget_bias=1.0, + state_is_tuple=True, + activation=None, + reuse=None): + super(LSTMCellWithSimpleAttention, self).__init__(_reuse=reuse) + if not state_is_tuple: + logging.warn("%s: Using a concatenated state is slower and will " + "soon be deprecated. Use state_is_tuple=True.", self) + self._num_units = num_units + # set padding part to 0 + self._encoder_vector = self._reset_padding(encoder_vector, + source_sequence_length) + self._encoder_proj = self._reset_padding(encoder_proj, + source_sequence_length) + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = activation or math_ops.tanh + self._linear = None + + @property + def state_size(self): + return (LSTMStateTuple(self._num_units, self._num_units) \ + if self._state_is_tuple else 2 * self._num_units) + + @property + def output_size(self): + return self._num_units + + def zero_state(self, batch_size, dtype): + state_size = self.state_size + if hasattr(self, "_last_zero_state"): + (last_state_size, last_batch_size, last_dtype, + last_output) = getattr(self, "_last_zero_state") + if (last_batch_size == batch_size and last_dtype == dtype and + last_state_size == state_size): + return last_output + with ops.name_scope( + type(self).__name__ + "ZeroState", values=[batch_size]): + output = _zero_state_tensors(state_size, batch_size, dtype) + self._last_zero_state = (state_size, batch_size, dtype, output) + return output + + def call(self, inputs, state): + sigmoid = math_ops.sigmoid + # Parameters of gates are concatenated into one multiply for efficiency. + if self._state_is_tuple: + c, h = state + else: + c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1) + + # get context from encoder outputs + context = self._simple_attention(self._encoder_vector, + self._encoder_proj, h) + + if self._linear is None: + self._linear = _Linear([inputs, context, h], 4 * self._num_units, + True) + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + i, j, f, o = array_ops.split( + value=self._linear([inputs, context, h]), + num_or_size_splits=4, + axis=1) + + new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * + self._activation(j)) + new_h = self._activation(new_c) * sigmoid(o) + + if self._state_is_tuple: + new_state = LSTMStateTuple(new_c, new_h) + else: + new_state = array_ops.concat([new_c, new_h], 1) + return new_h, new_state + + def _simple_attention(self, encoder_vec, encoder_proj, decoder_state): + """Implement the attention function. + The implementation has the same logic to the fluid decoder. + """ + decoder_state_proj = tf.contrib.layers.fully_connected( + inputs=decoder_state, + num_outputs=self._num_units, + activation_fn=None, + biases_initializer=None) + decoder_state_expand = tf.tile( + tf.expand_dims( + input=decoder_state_proj, axis=1), + [1, tf.shape(encoder_proj)[1], 1]) + concated = tf.concat([decoder_state_expand, encoder_proj], axis=2) + # need reduce the first dimension + attention_weights = tf.contrib.layers.fully_connected( + inputs=tf.reshape( + concated, shape=[-1, self._num_units * 2]), + num_outputs=1, + activation_fn=tf.nn.tanh, + biases_initializer=None) + attention_weights_reshaped = tf.reshape( + attention_weights, shape=[tf.shape(encoder_vec)[0], -1, 1]) + # normalize the attention weights using softmax + attention_weights_normed = tf.nn.softmax( + attention_weights_reshaped, dim=1) + scaled = tf.multiply(attention_weights_normed, encoder_vec) + context = tf.reduce_sum(scaled, axis=1) + return context + + def _reset_padding(self, + memory, + memory_sequence_length, + check_inner_dims_defined=True): + """Reset the padding part for encoder inputs. + This funtion comes from tensorflow's `_prepare_memory` function. + """ + memory = nest.map_structure( + lambda m: ops.convert_to_tensor(m, name="memory"), memory) + if memory_sequence_length is not None: + memory_sequence_length = ops.convert_to_tensor( + memory_sequence_length, name="memory_sequence_length") + if check_inner_dims_defined: + + def _check_dims(m): + if not m.get_shape()[2:].is_fully_defined(): + raise ValueError( + "Expected memory %s to have fully defined inner dims, " + "but saw shape: %s" % (m.name, m.get_shape())) + + nest.map_structure(_check_dims, memory) + if memory_sequence_length is None: + seq_len_mask = None + else: + seq_len_mask = array_ops.sequence_mask( + memory_sequence_length, + maxlen=array_ops.shape(nest.flatten(memory)[0])[1], + dtype=nest.flatten(memory)[0].dtype) + seq_len_batch_size = (memory_sequence_length.shape[0].value or + array_ops.shape(memory_sequence_length)[0]) + + def _maybe_mask(m, seq_len_mask): + rank = m.get_shape().ndims + rank = rank if rank is not None else array_ops.rank(m) + extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32) + m_batch_size = m.shape[0].value or array_ops.shape(m)[0] + if memory_sequence_length is not None: + message = ("memory_sequence_length and memory tensor " + "batch sizes do not match.") + with ops.control_dependencies([ + check_ops.assert_equal( + seq_len_batch_size, m_batch_size, message=message) + ]): + seq_len_mask = array_ops.reshape( + seq_len_mask, + array_ops.concat( + (array_ops.shape(seq_len_mask), extra_ones), 0)) + return m * seq_len_mask + else: + return m + + return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), + memory) + + +def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim, + target_dict_dim, is_generating, beam_size, + max_generation_length): + src_word_idx = tf.placeholder(tf.int32, shape=[None, None]) + src_sequence_length = tf.placeholder(tf.int32, shape=[None, ]) + + src_embedding_weights = tf.get_variable("source_word_embeddings", + [source_dict_dim, embedding_dim]) + src_embedding = tf.nn.embedding_lookup(src_embedding_weights, src_word_idx) + + src_forward_cell = tf.nn.rnn_cell.BasicLSTMCell(encoder_size) + src_reversed_cell = tf.nn.rnn_cell.BasicLSTMCell(encoder_size) + # no peephole + encoder_outputs, _ = tf.nn.bidirectional_dynamic_rnn( + cell_fw=src_forward_cell, + cell_bw=src_reversed_cell, + inputs=src_embedding, + sequence_length=src_sequence_length, + dtype=tf.float32) + + # concat the forward outputs and backward outputs + encoded_vec = tf.concat(encoder_outputs, axis=2) + + # project the encoder outputs to size of decoder lstm + encoded_proj = tf.contrib.layers.fully_connected( + inputs=tf.reshape( + encoded_vec, shape=[-1, embedding_dim * 2]), + num_outputs=decoder_size, + activation_fn=None, + biases_initializer=None) + encoded_proj_reshape = tf.reshape( + encoded_proj, shape=[-1, tf.shape(encoded_vec)[1], decoder_size]) + + # get init state for decoder lstm's H + backword_first = tf.slice(encoder_outputs[1], [0, 0, 0], [-1, 1, -1]) + decoder_boot = tf.contrib.layers.fully_connected( + inputs=tf.reshape( + backword_first, shape=[-1, embedding_dim]), + num_outputs=decoder_size, + activation_fn=tf.nn.tanh, + biases_initializer=None) + + # prepare the initial state for decoder lstm + cell_init = tf.zeros(tf.shape(decoder_boot), tf.float32) + initial_state = LSTMStateTuple(cell_init, decoder_boot) + + # create decoder lstm cell + decoder_cell = LSTMCellWithSimpleAttention( + decoder_size, + encoded_vec + if not is_generating else seq2seq.tile_batch(encoded_vec, beam_size), + encoded_proj_reshape if not is_generating else + seq2seq.tile_batch(encoded_proj_reshape, beam_size), + src_sequence_length if not is_generating else + seq2seq.tile_batch(src_sequence_length, beam_size), + forget_bias=0.0) + + output_layer = Dense(target_dict_dim, name='output_projection') + + if not is_generating: + trg_word_idx = tf.placeholder(tf.int32, shape=[None, None]) + trg_sequence_length = tf.placeholder(tf.int32, shape=[None, ]) + trg_embedding_weights = tf.get_variable( + "target_word_embeddings", [target_dict_dim, embedding_dim]) + trg_embedding = tf.nn.embedding_lookup(trg_embedding_weights, + trg_word_idx) + + training_helper = seq2seq.TrainingHelper( + inputs=trg_embedding, + sequence_length=trg_sequence_length, + time_major=False, + name='training_helper') + + training_decoder = seq2seq.BasicDecoder( + cell=decoder_cell, + helper=training_helper, + initial_state=initial_state, + output_layer=output_layer) + + # get the max length of target sequence + max_decoder_length = tf.reduce_max(trg_sequence_length) + + decoder_outputs_train, _, _ = seq2seq.dynamic_decode( + decoder=training_decoder, + output_time_major=False, + impute_finished=True, + maximum_iterations=max_decoder_length) + + decoder_logits_train = tf.identity(decoder_outputs_train.rnn_output) + decoder_pred_train = tf.argmax( + decoder_logits_train, axis=-1, name='decoder_pred_train') + masks = tf.sequence_mask( + lengths=trg_sequence_length, + maxlen=max_decoder_length, + dtype=tf.float32, + name='masks') + + # place holder of label sequence + lbl_word_idx = tf.placeholder(tf.int32, shape=[None, None]) + + # compute the loss + loss = seq2seq.sequence_loss( + logits=decoder_logits_train, + targets=lbl_word_idx, + weights=masks, + average_across_timesteps=True, + average_across_batch=True) + + # return feeding list and loss operator + return { + 'src_word_idx': src_word_idx, + 'src_sequence_length': src_sequence_length, + 'trg_word_idx': trg_word_idx, + 'trg_sequence_length': trg_sequence_length, + 'lbl_word_idx': lbl_word_idx + }, loss + else: + start_tokens = tf.ones([tf.shape(src_word_idx)[0], ], + tf.int32) * START_TOKEN_IDX + # share the same embedding weights with target word + trg_embedding_weights = tf.get_variable( + "target_word_embeddings", [target_dict_dim, embedding_dim]) + + inference_decoder = beam_search_decoder.BeamSearchDecoder( + cell=decoder_cell, + embedding=lambda tokens: tf.nn.embedding_lookup(trg_embedding_weights, tokens), + start_tokens=start_tokens, + end_token=END_TOKEN_IDX, + initial_state=tf.nn.rnn_cell.LSTMStateTuple( + tf.contrib.seq2seq.tile_batch(initial_state[0], beam_size), + tf.contrib.seq2seq.tile_batch(initial_state[1], beam_size)), + beam_width=beam_size, + output_layer=output_layer) + + decoder_outputs_decode, _, _ = seq2seq.dynamic_decode( + decoder=inference_decoder, + output_time_major=False, + #impute_finished=True,# error occurs + maximum_iterations=max_generation_length) + + predicted_ids = decoder_outputs_decode.predicted_ids + + return { + 'src_word_idx': src_word_idx, + 'src_sequence_length': src_sequence_length + }, predicted_ids + + +def print_arguments(args): + print('----------- Configuration Arguments -----------') + for arg, value in vars(args).iteritems(): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def padding_data(data, padding_size, value): + data = data + [value] * padding_size + return data[:padding_size] + + +def save(sess, path, var_list=None, global_step=None): + saver = tf.train.Saver(var_list) + save_path = saver.save(sess, save_path=path, global_step=global_step) + print('Model save at %s' % save_path) + + +def restore(sess, path, var_list=None): + # var_list = None returns the list of all saveable variables + saver = tf.train.Saver(var_list) + saver.restore(sess, save_path=path) + print('model restored from %s' % path) + + +def adapt_batch_data(data): + src_seq = map(lambda x: x[0], data) + trg_seq = map(lambda x: x[1], data) + lbl_seq = map(lambda x: x[2], data) + + src_sequence_length = np.array( + [len(seq) for seq in src_seq]).astype('int32') + src_seq_maxlen = np.max(src_sequence_length) + + trg_sequence_length = np.array( + [len(seq) for seq in trg_seq]).astype('int32') + trg_seq_maxlen = np.max(trg_sequence_length) + + src_seq = np.array( + [padding_data(seq, src_seq_maxlen, END_TOKEN_IDX) + for seq in src_seq]).astype('int32') + + trg_seq = np.array( + [padding_data(seq, trg_seq_maxlen, END_TOKEN_IDX) + for seq in trg_seq]).astype('int32') + + lbl_seq = np.array( + [padding_data(seq, trg_seq_maxlen, END_TOKEN_IDX) + for seq in lbl_seq]).astype('int32') + + return { + 'src_word_idx': src_seq, + 'src_sequence_length': src_sequence_length, + 'trg_word_idx': trg_seq, + 'trg_sequence_length': trg_sequence_length, + 'lbl_word_idx': lbl_seq + } + + +def train(): + feeding_dict, loss = seq_to_seq_net( + embedding_dim=args.embedding_dim, + encoder_size=args.encoder_size, + decoder_size=args.decoder_size, + source_dict_dim=args.dict_size, + target_dict_dim=args.dict_size, + is_generating=False, + beam_size=args.beam_size, + max_generation_length=args.max_generation_length) + + global_step = tf.Variable(0, trainable=False, name='global_step') + trainable_params = tf.trainable_variables() + optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) + + gradients = tf.gradients(loss, trainable_params) + # may clip the parameters + clip_gradients, _ = tf.clip_by_global_norm(gradients, 1.0) + + updates = optimizer.apply_gradients( + zip(gradients, trainable_params), global_step=global_step) + + src_dict, trg_dict = paddle.dataset.wmt14.get_dict(args.dict_size) + + train_batch_generator = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(args.dict_size), buf_size=1000), + batch_size=args.batch_size) + + test_batch_generator = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.test(args.dict_size), buf_size=1000), + batch_size=args.batch_size) + + def do_validataion(): + total_loss = 0.0 + count = 0 + for batch_id, data in enumerate(test_batch_generator()): + adapted_batch_data = adapt_batch_data(data) + outputs = sess.run([loss], + feed_dict={ + item[1]: adapted_batch_data[item[0]] + for item in feeding_dict.items() + }) + total_loss += outputs[0] + count += 1 + return total_loss / count + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + + with tf.Session(config=config) as sess: + init_g = tf.global_variables_initializer() + init_l = tf.local_variables_initializer() + sess.run(init_l) + sess.run(init_g) + for pass_id in xrange(args.pass_num): + pass_start_time = time.time() + words_seen = 0 + for batch_id, data in enumerate(train_batch_generator()): + adapted_batch_data = adapt_batch_data(data) + words_seen += np.sum(adapted_batch_data['src_sequence_length']) + words_seen += np.sum(adapted_batch_data['trg_sequence_length']) + outputs = sess.run([updates, loss], + feed_dict={ + item[1]: adapted_batch_data[item[0]] + for item in feeding_dict.items() + }) + print("pass_id=%d, batch_id=%d, train_loss: %f" % + (pass_id, batch_id, outputs[1])) + pass_end_time = time.time() + test_loss = do_validataion() + time_consumed = pass_end_time - pass_start_time + words_per_sec = words_seen / time_consumed + print("pass_id=%d, test_loss: %f, words/s: %f, sec/pass: %f" % + (pass_id, test_loss, words_per_sec, time_consumed)) + + +def infer(): + feeding_dict, predicted_ids = seq_to_seq_net( + embedding_dim=args.embedding_dim, + encoder_size=args.encoder_size, + decoder_size=args.decoder_size, + source_dict_dim=args.dict_size, + target_dict_dim=args.dict_size, + is_generating=True, + beam_size=args.beam_size, + max_generation_length=args.max_generation_length) + + src_dict, trg_dict = paddle.dataset.wmt14.get_dict(args.dict_size) + test_batch_generator = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(args.dict_size), buf_size=1000), + batch_size=args.batch_size) + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + with tf.Session(config=config) as sess: + restore(sess, './checkpoint/tf_seq2seq-1500') + for batch_id, data in enumerate(test_batch_generator()): + src_seq = map(lambda x: x[0], data) + + source_language_seq = [ + src_dict[item] for seq in src_seq for item in seq + ] + + src_sequence_length = np.array( + [len(seq) for seq in src_seq]).astype('int32') + src_seq_maxlen = np.max(src_sequence_length) + src_seq = np.array([ + padding_data(seq, src_seq_maxlen, END_TOKEN_IDX) + for seq in src_seq + ]).astype('int32') + + outputs = sess.run([predicted_ids], + feed_dict={ + feeding_dict['src_word_idx']: src_seq, + feeding_dict['src_sequence_length']: + src_sequence_length + }) + + print("\nDecoder result comparison: ") + source_language_seq = ' '.join(source_language_seq).lstrip( + '').rstrip('').strip() + inference_seq = '' + print(" --> source: " + source_language_seq) + for item in outputs[0][0]: + if item[0] == END_TOKEN_IDX: break + inference_seq += ' ' + trg_dict.get(item[0], '') + print(" --> inference: " + inference_seq) + + +if __name__ == '__main__': + args = parser.parse_args() + print_arguments(args) + if args.infer_only: + infer() + else: + train() diff --git a/benchmark/tensorflow/mnist.py b/benchmark/tensorflow/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..7140eed6eaff49b5c65f9ccb2e38f113a4cdbdbf --- /dev/null +++ b/benchmark/tensorflow/mnist.py @@ -0,0 +1,180 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import time +import numpy as np + +import tensorflow as tf +import paddle.v2 as paddle + +DTYPE = tf.float32 + + +def parse_args(): + parser = argparse.ArgumentParser("mnist model benchmark.") + parser.add_argument( + '--batch_size', type=int, default=128, help='The minibatch size.') + parser.add_argument( + '--iterations', type=int, default=35, help='The number of minibatches.') + parser.add_argument( + '--pass_num', type=int, default=5, help='The number of passes.') + parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help='The device type.') + args = parser.parse_args() + return args + + +def run_benchmark(args): + def weight_variable(dtype, shape): + initial = tf.truncated_normal(shape, stddev=0.1, dtype=dtype) + return tf.Variable(initial) + + def bias_variable(dtype, shape): + initial = tf.constant(0.1, shape=shape, dtype=dtype) + return tf.Variable(initial) + + device = '/cpu:0' if args.device == 'CPU' else '/device:GPU:0' + with tf.device(device): + images = tf.placeholder(DTYPE, shape=(None, 28, 28, 1)) + labels = tf.placeholder(tf.int64, shape=(None, )) + + # conv1, relu, pool1 + conv1_weights = weight_variable(DTYPE, [5, 5, 1, 20]) + conv1_bias = bias_variable(DTYPE, [20]) + conv1 = tf.nn.conv2d( + images, conv1_weights, strides=[1, 1, 1, 1], padding="VALID") + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias)) + pool1 = tf.nn.max_pool( + relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + # conv2, relu, pool2 + conv2_weights = weight_variable(DTYPE, [5, 5, 20, 50]) + conv2_bias = bias_variable(DTYPE, [50]) + conv2 = tf.nn.conv2d( + pool1, conv2_weights, strides=[1, 1, 1, 1], padding="VALID") + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias)) + pool2 = tf.nn.max_pool( + relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + # FC + pool_shape = pool2.get_shape().as_list() + hidden_dim = reduce(lambda a, b: a * b, pool_shape[1:], 1) + reshape = tf.reshape(pool2, shape=(tf.shape(pool2)[0], hidden_dim)) + fc_weights = weight_variable(DTYPE, [hidden_dim, 10]) + fc_bias = bias_variable(DTYPE, [10]) + logits = tf.matmul(reshape, fc_weights) + fc_bias + + # Get prediction + prediction = tf.nn.softmax(logits) + + # Loss + one_hot_labels = tf.one_hot(labels, depth=10) + cost = -tf.reduce_sum(tf.log(prediction) * one_hot_labels, [1]) + avg_cost = tf.reduce_mean(cost) + + # Get accuracy + correct = tf.equal(tf.argmax(prediction, 1), labels) + accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) + + # metrics, g_accuracy + with tf.variable_scope("reset_metrics_accuracy_scope") as scope: + g_accuracy = tf.metrics.accuracy( + labels, tf.argmax( + prediction, axis=1)) + vars = tf.contrib.framework.get_variables( + scope, collection=tf.GraphKeys.LOCAL_VARIABLES) + g_accuracy_reset_op = tf.variables_initializer(vars) + + # Optimizer + opt = tf.train.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999) + train_op = opt.minimize(avg_cost) + # train_op = tf.train.AdamOptimizer(1e-4).minimize(avg_cost) + + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=args.batch_size) + + def eval_test(): + sess.run(g_accuracy_reset_op) + for batch_id, data in enumerate(test_reader()): + images_data = np.array( + map(lambda x: np.transpose(x[0].reshape([1, 28, 28]), axes=[1,2,0]), data)).astype("float32") + labels_data = np.array(map(lambda x: x[1], data)).astype("int64") + + loss, acc, g_acc = sess.run( + [avg_cost, accuracy, g_accuracy], + feed_dict={images: images_data, + labels: labels_data}) + return g_acc[1] + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + + with tf.Session(config=config) as sess: + init_g = tf.global_variables_initializer() + init_l = tf.local_variables_initializer() + sess.run(init_g) + sess.run(init_l) + for pass_id in range(args.pass_num): + sess.run(g_accuracy_reset_op) + + pass_start = time.time() + for batch_id, data in enumerate(train_reader()): + images_data = np.array( + map(lambda x: np.transpose(x[0].reshape([1, 28, 28]), axes=[1,2,0]), data)).astype("float32") + labels_data = np.array(map(lambda x: x[1], data)).astype( + "int64") + + start = time.time() + _, loss, acc, g_acc = sess.run( + [train_op, avg_cost, accuracy, g_accuracy], + feed_dict={images: images_data, + labels: labels_data}) + end = time.time() + + print("pass=%d, batch=%d, loss=%f, error=%f, elapse=%f" % + (pass_id, batch_id, loss, 1 - acc, (end - start) / 1000)) + + pass_end = time.time() + test_avg_acc = eval_test() + + print( + "pass=%d, training_avg_accuracy=%f, test_avg_acc=%f, elapse=%f" + % (pass_id, g_acc[1], test_avg_acc, + (pass_end - pass_start) / 1000)) + + +def print_arguments(args): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + args = parse_args() + print_arguments(args) + run_benchmark(args) diff --git a/benchmark/tensorflow/resnet.py b/benchmark/tensorflow/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c432fa8d59571e128b9ff9e3ffa1949b792ef3a4 --- /dev/null +++ b/benchmark/tensorflow/resnet.py @@ -0,0 +1,504 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +based on https://github.com/tensorflow/models/blob/master/official/resnet/resnet_model.py + +Get help: python resnet.py --help +See performance on flowers: python resnet.py +Train on cifar10: python resnet.py --data=cifar10 --with_test +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import time +import numpy as np + +import paddle.v2 as paddle +import tensorflow as tf + +DTYPE = tf.float32 + + +def parse_args(): + parser = argparse.ArgumentParser('Convolution model benchmark.') + parser.add_argument( + '--model', + type=str, + choices=['resnet'], + default='resnet', + help='The model architecture.') + parser.add_argument( + '--batch_size', type=int, default=32, help='The minibatch size.') + parser.add_argument( + '--use_fake_data', + action='store_true', + help='use real data or fake data') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', + type=int, + default=105, + help='The number of minibatches.') + parser.add_argument( + '--pass_num', type=int, default=300, help='The number of passes.') + parser.add_argument( + '--order', + type=str, + default='NHWC', + choices=['NCHW', 'NHWC'], + help='The data order, now only support NCHW.') + parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help='The device type.') + parser.add_argument( + '--data', + type=str, + default='flowers102', + choices=['flowers102', 'cifar10'], + help='The kinds of data.') + parser.add_argument( + '--infer_only', action='store_true', help='If set, run forward only.') + parser.add_argument( + '--use_cprof', action='store_true', help='If set, use cProfile.') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') + parser.add_argument( + '--use_nvprof', + action='store_true', + help='If set, use nvprof for CUDA.') + args = parser.parse_args() + return args + + +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + vars(args)['iterations'] = vars(args)['pass_num'] * 1000 if vars(args)[ + 'with_test'] else vars(args)['iterations'] + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def fixed_padding(inputs, kernel_size, data_format): + """Pads the input along the spatial dimensions independently of input size. + Args: + inputs: A tensor of size [batch, channels, height_in, width_in] or + [batch, height_in, width_in, channels] depending on data_format. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + Should be a positive integer. + data_format: The input format ('channels_last' or 'channels_first'). + Returns: + A tensor with the same format as the input with the data either intact + (if kernel_size == 1) or padded (if kernel_size > 1). + """ + pad_total = kernel_size - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + + if data_format == 'channels_first': + padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], + [pad_beg, pad_end]]) + else: + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], + [pad_beg, pad_end], [0, 0]]) + return padded_inputs + + +def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): + """Strided 2-D convolution with explicit padding.""" + # The padding is consistent and is based only on `kernel_size`, not on the + # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). + # This is consistent with PaddlePaddle. + # In addition, the calculation for output size in TensorFlow can refer: + # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/common_shape_fns.cc + if strides > 1: + inputs = fixed_padding(inputs, kernel_size, data_format) + + return tf.layers.conv2d( + inputs=inputs, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=('SAME' if strides == 1 else 'VALID'), + use_bias=False, + kernel_initializer=tf.variance_scaling_initializer(), + data_format=data_format) + + +def conv_bn(inputs, + filters, + kernel_size, + strides, + is_training, + data_format, + act=True): + # def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): + # set fused=True for a significant performance boost. See + # https://www.tensorflow.org/performance/performance_guide#common_fused_ops + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=kernel_size, + strides=strides, + data_format=data_format) + inputs = tf.layers.batch_normalization( + inputs=inputs, + axis=1 if data_format == 'channels_first' else 3, + momentum=0.9, + epsilon=1e-05, + center=True, + scale=True, + training=is_training, + fused=True) + if act: + inputs = tf.nn.relu(inputs) + return inputs + + +def basicblock(inputs, filters, is_training, projection_shortcut, strides, + data_format): + shortcut = inputs + if projection_shortcut is not None: + shortcut = projection_shortcut(inputs) + inputs = conv_bn(inputs, filters, 3, strides, is_training, data_format) + inputs = conv_bn(inputs, filters, 3, 1, is_training, data_format, act=False) + inputs = inputs + shortcut + inputs = tf.nn.relu(inputs) + return inputs + + +def bottleneck(inputs, filters, is_training, projection_shortcut, strides, + data_format): + shortcut = inputs + if projection_shortcut is not None: + shortcut = projection_shortcut(inputs) + inputs = conv_bn(inputs, filters, 1, strides, is_training, data_format) + inputs = conv_bn(inputs, filters, 3, 1, is_training, data_format, act=False) + inputs = conv_bn( + inputs, filters * 4, 1, 1, is_training, data_format, act=False) + inputs = inputs + shortcut + inputs = tf.nn.relu(inputs) + return inputs + + +def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, + data_format): + # Bottleneck blocks end with 4x the number of filters as they start with + filters_out = 4 * filters if block_fn is bottleneck else filters + + def projection_shortcut(inputs): + return conv2d_fixed_padding( + inputs=inputs, + filters=filters_out, + kernel_size=1, + strides=strides, + data_format=data_format) + + # Only the first block per block_layer uses projection_shortcut and strides + inputs = block_fn(inputs, filters, is_training, projection_shortcut, + strides, data_format) + + for _ in range(1, blocks): + inputs = block_fn(inputs, filters, is_training, None, 1, data_format) + + return tf.identity(inputs, name) + + +def resnet_imagenet(depth, class_dim, data_format): + """Returns the ResNet model for a given size and number of output classes.""" + + def resnet_generator(block_fn, + layers, + num_classes, + data_format='channels_last'): + if data_format is None: + data_format = ('channels_first' + if tf.test.is_built_with_cuda() else 'channels_last') + + def model(inputs, is_training): + """Constructs the ResNet model given the inputs.""" + if data_format == 'channels_first': + # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). + # This provides a large performance boost on GPU. See + # https://www.tensorflow.org/performance/performance_guide#data_formats + inputs = tf.transpose(inputs, [0, 3, 1, 2]) + + inputs = conv_bn(inputs, 64, 7, 2, is_training, data_format) + inputs = tf.identity(inputs, 'initial_conv') + inputs = tf.layers.max_pooling2d( + inputs=inputs, + pool_size=3, + strides=2, + padding='SAME', + data_format=data_format) + inputs = tf.identity(inputs, 'initial_max_pool') + inputs = block_layer(inputs, 64, block_fn, layers[0], 1, + is_training, 'block_layer1', data_format) + inputs = block_layer(inputs, 128, block_fn, layers[1], 2, + is_training, 'block_layer2', data_format) + inputs = block_layer(inputs, 256, block_fn, layers[2], 2, + is_training, 'block_layer3', data_format) + inputs = block_layer(inputs, 512, block_fn, layers[3], 2, + is_training, 'block_layer4', data_format) + inputs = tf.layers.average_pooling2d( + inputs=inputs, + pool_size=7, + strides=1, + padding='VALID', + data_format=data_format) + inputs = tf.identity(inputs, 'final_avg_pool') + inputs = tf.reshape(inputs, + [-1, 512 if block_fn is basicblock else 2048]) + inputs = tf.layers.dense(inputs=inputs, units=num_classes) + inputs = tf.identity(inputs, 'final_dense') + return inputs + + return model + + model_params = { + 18: { + 'block': basicblock, + 'layers': [2, 2, 2, 2] + }, + 34: { + 'block': basicblock, + 'layers': [3, 4, 6, 3] + }, + 50: { + 'block': bottleneck, + 'layers': [3, 4, 6, 3] + }, + 101: { + 'block': bottleneck, + 'layers': [3, 4, 23, 3] + }, + 152: { + 'block': bottleneck, + 'layers': [3, 8, 36, 3] + }, + 200: { + 'block': bottleneck, + 'layers': [3, 24, 36, 3] + } + } + if depth not in model_params: + raise ValueError('Not a valid depth:', depth) + params = model_params[depth] + return resnet_generator(params['block'], params['layers'], class_dim, + data_format) + + +def resnet_cifar10(depth, num_classes, data_format): + if depth % 6 != 2: + raise ValueError('depth must be 6n + 2:', depth) + + num_blocks = (depth - 2) // 6 + + if data_format is None: + data_format = ('channels_first' + if tf.test.is_built_with_cuda() else 'channels_last') + + def model(inputs, is_training): + inputs = conv_bn(inputs, 16, 3, 1, is_training, data_format) + inputs = tf.identity(inputs, 'initial_conv') + inputs = block_layer(inputs, 16, basicblock, num_blocks, 1, is_training, + 'block_layer1', data_format) + inputs = block_layer(inputs, 32, basicblock, num_blocks, 2, is_training, + 'block_layer2', data_format) + inputs = block_layer(inputs, 64, basicblock, num_blocks, 2, is_training, + 'block_layer3', data_format) + inputs = tf.layers.average_pooling2d( + inputs=inputs, + pool_size=8, + strides=1, + padding='VALID', + data_format=data_format) + inputs = tf.identity(inputs, 'final_avg_pool') + inputs = tf.reshape(inputs, [-1, 64]) + inputs = tf.layers.dense(inputs=inputs, units=num_classes) + inputs = tf.identity(inputs, 'final_dense') + return inputs + + return model + + +def run_benchmark(args, data_format='channels_last', device='/cpu:0'): + """Our model_fn for ResNet to be used with our Estimator.""" + + class_dim = 1000 + dshape = (None, 224, 224, 3) + + pdshape = (3, 224, 224) + if args.data == 'flowers102': + class_dim = 102 + dshape = (None, 224, 224, 3) + pdshape = (3, 224, 224) + elif args.data == 'cifar10': + class_dim = 10 + dshape = (None, 32, 32, 3) + pdshape = (3, 32, 32) + + with tf.device(device): + images = tf.placeholder(DTYPE, shape=dshape) + labels = tf.placeholder(tf.int64, shape=(None, )) + is_training = tf.placeholder('bool') + onehot_labels = tf.one_hot(labels, depth=class_dim) + + network = resnet_cifar10( + 32, class_dim, + data_format) if args.data == 'cifar10' else resnet_imagenet( + 50, class_dim, data_format) + + logits = network(inputs=images, is_training=is_training) + + cross_entropy = tf.losses.softmax_cross_entropy( + logits=logits, onehot_labels=onehot_labels) + avg_cost = tf.reduce_mean(cross_entropy) + + correct = tf.equal(tf.argmax(logits, 1), labels) + accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) + + lr = 0.1 if args.data == 'cifar10' else 0.01 + optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9) + + # Batch norm requires update_ops to be added as a train_op dependency. + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies(update_ops): + train_op = optimizer.minimize(avg_cost) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.dataset.cifar.test10() + if args.data == 'cifar10' else paddle.dataset.flowers.test(), + batch_size=100) + + def test(): + test_accs = [] + for batch_id, data in enumerate(test_reader()): + test_images = np.array( + map(lambda x: np.transpose(x[0].reshape(pdshape), + axes=[1, 2, 0]), data)).astype("float32") + test_labels = np.array(map(lambda x: x[1], data)).astype('int64') + test_accs.append( + accuracy.eval(feed_dict={ + images: test_images, + labels: test_labels, + is_training: False + })) + print("Pass = %d, Train performance = %f imgs/s, Test accuracy = %f\n" % + (pass_id, num_samples / train_elapsed, np.mean(test_accs))) + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + + with tf.Session(config=config) as sess: + init_g = tf.global_variables_initializer() + init_l = tf.local_variables_initializer() + sess.run(init_g) + sess.run(init_l) + + if args.use_fake_data: + data = train_reader().next() + images_data = np.array( + map(lambda x: np.transpose(x[0].reshape(pdshape), + axes=[1, 2, 0]), data)).astype("float32") + labels_data = np.array(map(lambda x: x[1], data)).astype('int64') + iters, num_samples, start_time = 0, 0, 0.0 + for pass_id in range(args.pass_num): + if iters == args.iterations: + break + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + if not args.use_fake_data: + images_data = np.array( + map(lambda x: np.transpose(x[0].reshape(pdshape), + axes=[1, 2, 0]), data)).astype("float32") + labels_data = np.array(map(lambda x: x[1], data)).astype( + 'int64') + _, loss, acc = sess.run([train_op, avg_cost, accuracy], + feed_dict={ + images: images_data, + labels: labels_data, + is_training: True + }) + iters += 1 + train_accs.append(acc) + train_losses.append(loss) + num_samples += len(data) + print("Pass=%d, Iter=%d, Loss=%f, Accuray=%f\n" % + (pass_id, iters, loss, acc)) + + train_elapsed = time.time() - start_time + print("Pass=%d, Loss=%f, Accuray=%f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + + # evaluation + if args.with_test: + test() + + if not args.with_test: + duration = time.time() - start_time + examples_per_sec = num_samples / duration + sec_per_batch = duration / (iters - args.skip_batch_num) + + print('Total examples: %d, total time: %.5f' % + (num_samples, duration)) + print('%.5f examples/sec, %.5f sec/batch' % + (examples_per_sec, sec_per_batch)) + + +if __name__ == '__main__': + args = parse_args() + print_arguments(args) + if tf.test.is_built_with_cuda(): + device = '/device:GPU:0' + if args.order == 'NHWC': + data_format = 'channels_last' + else: + data_format = 'channels_first' + else: + device = '/cpu:0' + if args.order == 'NHWC': + data_format = 'channels_last' + else: + raise ValueError('Only support NHWC order in CPU mode') + + run_benchmark(args, data_format, device) diff --git a/benchmark/tensorflow/stacked_dynamic_lstm.py b/benchmark/tensorflow/stacked_dynamic_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..5285033005044d907d0b7e91eb66ee7281c4f27a --- /dev/null +++ b/benchmark/tensorflow/stacked_dynamic_lstm.py @@ -0,0 +1,220 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import argparse +import time +import tensorflow as tf + +import paddle.v2 as paddle + + +def parse_args(): + parser = argparse.ArgumentParser("LSTM model benchmark.") + parser.add_argument( + '--batch_size', + type=int, + default=32, + help='The sequence number of a batch data. (default: %(default)d)') + parser.add_argument( + '--stacked_num', + type=int, + default=5, + help='Number of lstm layers to stack. (default: %(default)d)') + parser.add_argument( + '--embedding_dim', + type=int, + default=512, + help='Dimension of embedding table. (default: %(default)d)') + parser.add_argument( + '--hidden_dim', + type=int, + default=512, + help='Hidden size of lstm unit. (default: %(default)d)') + parser.add_argument( + '--pass_num', + type=int, + default=10, + help='Epoch number to train. (default: %(default)d)') + parser.add_argument( + '--learning_rate', + type=float, + default=0.0002, + help='Learning rate used to train. (default: %(default)f)') + parser.add_argument( + '--infer_only', action='store_true', help='If set, run forward only.') + args = parser.parse_args() + return args + + +def print_arguments(args): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def dynamic_lstm_model(dict_size, + embedding_dim, + hidden_dim, + stacked_num, + class_num=2, + is_train=True): + word_idx = tf.placeholder(tf.int64, shape=[None, None]) + sequence_length = tf.placeholder(tf.int64, shape=[None, ]) + + embedding_weights = tf.get_variable('word_embeddings', + [dict_size, embedding_dim]) + embedding = tf.nn.embedding_lookup(embedding_weights, word_idx) + + lstm_cell = tf.nn.rnn_cell.LSTMCell( + num_units=hidden_dim, use_peepholes=False) + stacked_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * stacked_num) + + # final_state [LSTMTuple(c, h), LSTMTuple(c, h) ...] total stacked_num LSTMTuples + _, final_state = tf.nn.dynamic_rnn( + cell=stacked_cell, + inputs=embedding, + dtype=tf.float32, + sequence_length=sequence_length) + + w = tf.Variable( + tf.truncated_normal([hidden_dim, class_num]), dtype=tf.float32) + bias = tf.Variable( + tf.constant( + value=0.0, shape=[class_num], dtype=tf.float32)) + prediction = tf.matmul(final_state[-1][1], w) + bias + + if not is_train: + return (word_idx, sequence_length), tf.nn.softmax(prediction) + + label = tf.placeholder(tf.int64, shape=[None, ]) + loss = tf.nn.softmax_cross_entropy_with_logits( + labels=tf.one_hot(label, 2), logits=prediction) + avg_loss = tf.reduce_mean(loss) + + correct_count = tf.equal(tf.argmax(prediction, 1), label) + acc = tf.reduce_mean(tf.cast(correct_count, tf.float32)) + + with tf.variable_scope("reset_metrics_accuracy_scope") as scope: + g_acc = tf.metrics.accuracy(label, tf.argmax(prediction, axis=1)) + vars = tf.contrib.framework.get_variables( + scope, collection=tf.GraphKeys.LOCAL_VARIABLES) + reset_op = tf.variables_initializer(vars) + + return (word_idx, sequence_length, label), avg_loss, acc, g_acc, reset_op + + +def padding_data(data, padding_size, value): + data = data + [value] * padding_size + return data[:padding_size] + + +def train(args): + word_dict = paddle.dataset.imdb.word_dict() + dict_size = len(word_dict) + + feeding_list, avg_loss, acc, g_acc, reset_op = dynamic_lstm_model( + dict_size, args.embedding_dim, args.hidden_dim, args.stacked_num) + + adam_optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) + train_op = adam_optimizer.minimize(avg_loss) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=args.batch_size) + + test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.test(word_dict), buf_size=25000), + batch_size=args.batch_size) + + def do_validation(sess): + sess.run(reset_op) + for batch_id, data in enumerate(test_reader()): + word_idx = map(lambda x: x[0], data) + sequence_length = np.array( + [len(seq) for seq in word_idx]).astype('int64') + maxlen = np.max(sequence_length) + word_idx = [padding_data(seq, maxlen, 0) for seq in word_idx] + word_idx = np.array(word_idx).astype('int64') + label = np.array(map(lambda x: x[1], data)).astype('int64') + + _, loss, fetch_acc, fetch_g_acc = sess.run( + [train_op, avg_loss, acc, g_acc], + feed_dict={ + feeding_list[0]: word_idx, + feeding_list[1]: sequence_length, + feeding_list[2]: label + }) + + return fetch_g_acc[1] + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + with tf.Session(config=config) as sess: + init_g = tf.global_variables_initializer() + init_l = tf.local_variables_initializer() + sess.run(init_l) + sess.run(init_g) + + for pass_id in xrange(args.pass_num): + # clear accuracy local variable + sess.run(reset_op) + pass_start_time = time.time() + words_seen = 0 + + for batch_id, data in enumerate(train_reader()): + word_idx = map(lambda x: x[0], data) + sequence_length = np.array( + [len(seq) for seq in word_idx]).astype('int64') + words_seen += np.sum(sequence_length) + maxlen = np.max(sequence_length) + word_idx = [padding_data(seq, maxlen, 0) for seq in word_idx] + word_idx = np.array(word_idx).astype('int64') + label = np.array(map(lambda x: x[1], data)).astype('int64') + + _, loss, fetch_acc, fetch_g_acc = sess.run( + [train_op, avg_loss, acc, g_acc], + feed_dict={ + feeding_list[0]: word_idx, + feeding_list[1]: sequence_length, + feeding_list[2]: label + }) + + print("pass_id=%d, batch_id=%d, loss: %f, acc: %f, avg_acc: %f" + % (pass_id, batch_id, loss, fetch_acc, fetch_g_acc[1])) + + pass_end_time = time.time() + time_consumed = pass_end_time - pass_start_time + words_per_sec = words_seen / time_consumed + test_acc = do_validation(sess) + print("pass_id=%d, test_acc: %f, words/s: %f, sec/pass: %f" % + (pass_id, test_acc, words_per_sec, time_consumed)) + + +if __name__ == '__main__': + args = parse_args() + print_arguments(args) + + if args.infer_only: + pass + else: + train(args) diff --git a/benchmark/tensorflow/vgg.py b/benchmark/tensorflow/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..fba5ec71a46b3ac8b2e1244424c39fd5192e5458 --- /dev/null +++ b/benchmark/tensorflow/vgg.py @@ -0,0 +1,324 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""VGG16 benchmark in TensorFlow""" +import tensorflow as tf +import paddle.v2 as paddle +import numpy as np +import argparse +import time + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--batch_size', type=int, default=128, help="Batch size for training.") +parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test') +parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') +parser.add_argument( + '--learning_rate', + type=float, + default=1e-3, + help="Learning rate for training.") +parser.add_argument('--num_passes', type=int, default=50, help="No. of passes.") +parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help="The device type.") +parser.add_argument( + '--data_format', + type=str, + default='NHWC', + choices=['NCHW', 'NHWC'], + help='The data order, NCHW=[batch, channels, height, width].' + 'Only support NHWC right now.') +parser.add_argument( + '--data_set', + type=str, + default='cifar10', + choices=['cifar10', 'flowers'], + help='Optional dataset for benchmark.') +args = parser.parse_args() + + +class VGG16Model(object): + def __init__(self): + self.parameters = [] + + def batch_norm_relu(self, inputs, is_training): + """Performs a batch normalization followed by a ReLU.""" + # We set fused=True for a significant speed boost. See + # https://www.tensorflow.org/speed/speed_guide#common_fused_ops + inputs = tf.layers.batch_normalization( + inputs=inputs, + axis=1 if args.data_format == 'NCHW' else -1, + momentum=0.9, + epsilon=1e-05, + center=True, + scale=True, + training=is_training, + fused=True) + inputs = tf.nn.relu(inputs) + return inputs + + def conv_bn_layer(self, + name, + images, + kernel_shape, + is_training, + drop_rate=0.0): + with tf.name_scope(name) as scope: + kernel = tf.Variable( + tf.truncated_normal( + kernel_shape, dtype=tf.float32, stddev=1e-1), + name='weights') + conv = tf.nn.conv2d( + images, + kernel, [1, 1, 1, 1], + data_format=args.data_format, + padding='SAME') + biases = tf.Variable( + tf.constant( + 0.0, shape=[kernel_shape[-1]], dtype=tf.float32), + trainable=True, + name='biases') + out = tf.nn.bias_add(conv, biases) + out = self.batch_norm_relu(out, is_training) + out = tf.layers.dropout(out, rate=drop_rate, training=is_training) + return out + + def fc_layer(self, name, inputs, shape): + with tf.name_scope(name) as scope: + fc_w = tf.Variable( + tf.truncated_normal( + shape, dtype=tf.float32, stddev=1e-1), + name='weights') + fc_b = tf.Variable( + tf.constant( + 0.0, shape=[shape[-1]], dtype=tf.float32), + trainable=True, + name='biases') + out = tf.nn.bias_add(tf.matmul(inputs, fc_w), fc_b) + return out + + def network(self, images, class_dim, is_training): + """ VGG16 model structure. + + TODO(kuke): enable this network to support the 'NCHW' data format + """ + + # conv1 + conv1_1 = self.conv_bn_layer( + 'conv1_1', images, [3, 3, 3, 64], is_training, drop_rate=0.3) + conv1_2 = self.conv_bn_layer( + 'conv1_2', conv1_1, [3, 3, 64, 64], is_training, drop_rate=0.0) + # pool1 + pool1 = tf.nn.max_pool( + conv1_2, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool1') + # conv2 + conv2_1 = self.conv_bn_layer( + 'conv2_1', pool1, [3, 3, 64, 128], is_training, drop_rate=0.4) + conv2_2 = self.conv_bn_layer( + 'conv2_2', conv2_1, [3, 3, 128, 128], is_training, drop_rate=0.0) + # pool2 + pool2 = tf.nn.max_pool( + conv2_2, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool2') + # conv3 + conv3_1 = self.conv_bn_layer( + 'conv3_1', pool2, [3, 3, 128, 256], is_training, drop_rate=0.4) + conv3_2 = self.conv_bn_layer( + 'conv3_2', conv3_1, [3, 3, 256, 256], is_training, drop_rate=0.4) + conv3_3 = self.conv_bn_layer( + 'conv3_3', conv3_2, [3, 3, 256, 256], is_training, drop_rate=0.0) + # pool3 + pool3 = tf.nn.max_pool( + conv3_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool3') + # conv4 + conv4_1 = self.conv_bn_layer( + 'conv4_1', pool3, [3, 3, 256, 512], is_training, drop_rate=0.4) + conv4_2 = self.conv_bn_layer( + 'conv4_2', conv4_1, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv4_3 = self.conv_bn_layer( + 'conv4_3', conv4_2, [3, 3, 512, 512], is_training, drop_rate=0.0) + # pool4 + pool4 = tf.nn.max_pool( + conv4_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool4') + # conv5 + conv5_1 = self.conv_bn_layer( + 'conv5_1', pool4, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv5_2 = self.conv_bn_layer( + 'conv5_2', conv5_1, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv5_3 = self.conv_bn_layer( + 'conv5_3', conv5_2, [3, 3, 512, 512], is_training, drop_rate=0.0) + # pool5 + pool5 = tf.nn.max_pool( + conv5_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool4') + # flatten + shape = int(np.prod(pool5.get_shape()[1:])) + pool5_flat = tf.reshape(pool5, [-1, shape]) + # fc1 + drop = tf.layers.dropout(pool5_flat, rate=0.5, training=is_training) + fc1 = self.fc_layer('fc1', drop, [shape, 512]) + # fc2 + bn = self.batch_norm_relu(fc1, is_training) + drop = tf.layers.dropout(bn, rate=0.5, training=is_training) + fc2 = self.fc_layer('fc2', drop, [512, 512]) + + fc3 = self.fc_layer('fc3', fc2, [512, class_dim]) + + return fc3 + + +def run_benchmark(): + """Run benchmark on cifar10 or flowers.""" + + if args.data_set == "cifar10": + class_dim = 10 + raw_shape = (3, 32, 32) + dat_shape = (None, 32, 32, 3) if args.data_format == 'NHWC' else ( + None, 3, 32, 32) + else: + class_dim = 102 + raw_shape = (3, 224, 224) + dat_shape = (None, 224, 224, 3) if args.data_format == 'NHWC' else ( + None, 3, 224, 224) + + device = '/cpu:0' if args.device == 'CPU' else '/device:GPU:0' + + with tf.device(device): + images = tf.placeholder(tf.float32, shape=dat_shape) + labels = tf.placeholder(tf.int64, shape=(None, )) + is_training = tf.placeholder('bool') + onehot_labels = tf.one_hot(labels, depth=class_dim) + + vgg16 = VGG16Model() + logits = vgg16.network(images, class_dim, is_training) + loss = tf.losses.softmax_cross_entropy( + onehot_labels=onehot_labels, logits=logits) + avg_loss = tf.reduce_mean(loss) + + correct = tf.equal(tf.argmax(logits, 1), labels) + accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) + + optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies(update_ops): + train_op = optimizer.minimize(avg_loss) + + # data reader + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.test10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), + buf_size=5120), + batch_size=args.batch_size) + + # test + def test(): + test_accs = [] + for batch_id, data in enumerate(test_reader()): + test_images = np.array( + map(lambda x: np.transpose(x[0].reshape(raw_shape), + axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") + test_labels = np.array(map(lambda x: x[1], data)).astype('int64') + test_accs.append( + accuracy.eval(feed_dict={ + images: test_images, + labels: test_labels, + is_training: False + })) + return np.mean(test_accs) + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + + with tf.Session(config=config) as sess: + init_g = tf.global_variables_initializer() + init_l = tf.local_variables_initializer() + sess.run(init_g) + sess.run(init_l) + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.num_passes): + # train + num_samples = 0 + start_time = time.time() + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + train_images = np.array( + map(lambda x: np.transpose(x[0].reshape(raw_shape), + axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") + train_labels = np.array(map(lambda x: x[1], data)).astype( + 'int64') + _, loss, acc = sess.run([train_op, avg_loss, accuracy], + feed_dict={ + images: train_images, + labels: train_labels, + is_training: True + }) + iters += 1 + num_samples += len(data) + print("Pass = %d, Iters = %d, Loss = %f, Accuracy = %f" % + (pass_id, iters, loss, acc)) + train_elapsed = time.time() - start_time + # test + pass_test_acc = test() + print("Pass = %d, Train speed = %f imgs/s, Test accuracy = %f\n" % + (pass_id, num_samples / train_elapsed, pass_test_acc)) + + +def print_arguments(): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + print_arguments() + run_benchmark() diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 6320b17520a687f88993b6f464d9115838b0f96b..e3b9d94215a858c5c9a34e1b7e97540f1876801d 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -62,29 +62,33 @@ endif() ## Then find the reference-cblas. www.netlib.org/blas/ - - set(REFERENCE_CBLAS_ROOT $ENV{REFERENCE_CBLAS_ROOT} CACHE PATH "Folder contains reference-cblas") -set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS - ${REFERENCE_CBLAS_ROOT}/include - /usr/include - /usr/include/cblas -) - -set(REFERENCE_CBLAS_LIB_SEARCH_PATHS - ${REFERENCE_CBLAS_ROOT}/lib - /usr/lib - /usr/lib/blas/reference/ - /usr/lib/reference/ -) +if(NOT CMAKE_CROSSCOMPILING) + set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS + ${REFERENCE_CBLAS_ROOT}/include + /usr/include + /usr/include/cblas + ) + + set(REFERENCE_CBLAS_LIB_SEARCH_PATHS + ${REFERENCE_CBLAS_ROOT}/lib + /usr/lib + /usr/lib/blas/reference/ + /usr/lib/reference/ + ) +else() + # Disable the finding of reference cblas under host's system path + set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/include) + set(REFERENCE_CBLAS_LIB_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/lib) +endif() find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS}) find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS ${REFERENCE_CBLAS_LIB_SEARCH_PATHS}) -if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) +if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) set(CBLAS_FOUND ON) set(CBLAS_PROVIDER REFERENCE) set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index 0853b981813c5d60a12603471df7e0b216b0822f..e90948782bb5e333bbdb47ef9d61c1e37e3cf9e4 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -24,16 +24,16 @@ SET(GRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/grpc) SET(GRPC_INCLUDE_DIR "${GRPC_INSTALL_DIR}/include/" CACHE PATH "grpc include directory." FORCE) SET(GRPC_CPP_PLUGIN "${GRPC_INSTALL_DIR}/bin/grpc_cpp_plugin" CACHE FILEPATH "GRPC_CPP_PLUGIN" FORCE) IF(APPLE) - SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin | sed "s/-Werror//g" | sh) + SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin | sed "s/-Werror//g" | sh) ELSE() - SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin) + SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin) ENDIF() ExternalProject_Add( extern_grpc DEPENDS protobuf zlib GIT_REPOSITORY "https://github.com/grpc/grpc.git" - GIT_TAG "v1.8.x" + GIT_TAG "v1.10.x" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index a25cff5fc567f22d4573625487f31bd4192bb172..5759e5c489724332793bf103b7aacf7ffb068611 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -36,7 +36,8 @@ MESSAGE(STATUS "Set ${MKLDNN_INSTALL_DIR}/lib to runtime path") SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLDNN_INSTALL_DIR}/lib") -INCLUDE_DIRECTORIES(${MKLDNN_INC_DIR}) +INCLUDE_DIRECTORIES(${MKLDNN_INC_DIR}) # For MKLDNN code to include internal headers. +INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include mkldnn.h IF(${CBLAS_PROVIDER} STREQUAL "MKLML") SET(MKLDNN_DEPENDS ${MKLML_PROJECT}) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index df3f0c7f0c31efaa127515bb98e5668b8f9df199..796bcf28a1dfb308ccb7a2f839742c5c2fcf2002 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -28,7 +28,7 @@ INCLUDE(ExternalProject) SET(MKLML_PROJECT "extern_mklml") SET(MKLML_VER "mklml_lnx_2018.0.1.20171007") -SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.11/${MKLML_VER}.tgz") +SET(MKLML_URL "http://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.tgz") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DST_DIR "mklml") diff --git a/cmake/external/nccl.cmake b/cmake/external/nccl.cmake deleted file mode 100644 index af5c689c3524741a88518eeb3f85996872257677..0000000000000000000000000000000000000000 --- a/cmake/external/nccl.cmake +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if(NOT WITH_GPU) - return() -endif() - -include(ExternalProject) - -set(NCCL_SOURCE_DIR ${THIRD_PARTY_PATH}/nccl) - -include_directories(${NCCL_SOURCE_DIR}/src/extern_nccl/src) - -if(WITH_DSO) - # If we use DSO, we do not build nccl, just download the dependencies - set(NCCL_BUILD_COMMAND "") - set(NCCL_INSTALL_COMMAND "") - set(NCCL_INSTALL_DIR "") -else() - # otherwise, we build nccl and link it. - set(NCCL_INSTALL_DIR ${THIRD_PARTY_PATH}/install/nccl) - # Note: cuda 8.0 is needed to make nccl - # When cuda is not installed on the system directory, need to set CUDA_HOME to your cuda root - set(NCCL_BUILD_COMMAND "make -j 8") - set(NCCL_INSTALL_COMMAND "make install PREFIX=${NCCL_INSTALL_DIR}") -endif() - -ExternalProject_Add( - extern_nccl - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/NVIDIA/nccl.git" - GIT_TAG "v1.3.4-1" - PREFIX "${NCCL_SOURCE_DIR}" - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND "${NCCL_BUILD_COMMAND}" - INSTALL_COMMAND "${NCCL_INSTALL_COMMAND}" - INSTALL_DIR "${NCCL_INSTALL_DIR}" - TEST_COMMAND "" -) - -if(WITH_DSO) - if(${CMAKE_VERSION} VERSION_LESS "3.3.0") - set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/lib_nccl_dummy.c) - file(WRITE ${dummyfile} "const char * dummy_nccl = \"${dummyfile}\";") - add_library(nccl STATIC ${dummyfile}) - else() - add_library(nccl INTERFACE) - endif() -else() - add_library(nccl STATIC IMPORTED GLOBAL) - set_property(TARGET nccl PROPERTY IMPORTED_LOCATION - ${NCCL_INSTALL_DIR}/lib/libnccl_static.a) -endif() - -add_dependencies(nccl extern_nccl) diff --git a/cmake/external/snappy.cmake b/cmake/external/snappy.cmake index 71f54c425d4c38e271a8f1b78887d95a27252443..80282329c6ac65fbd1493a6838efca4bd9cadaad 100644 --- a/cmake/external/snappy.cmake +++ b/cmake/external/snappy.cmake @@ -11,19 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -IF(MOBILE_INFERENCE) +if(MOBILE_INFERENCE OR RPI) return() -ENDIF() +endif() include (ExternalProject) # NOTE: snappy is needed when linking with recordio -SET(SNAPPY_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy) -SET(SNAPPY_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy) -SET(SNAPPY_INCLUDE_DIR "${SNAPPY_INSTALL_DIR}/include/" CACHE PATH "snappy include directory." FORCE) +set(SNAPPY_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy) +set(SNAPPY_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy) +set(SNAPPY_INCLUDE_DIR "${SNAPPY_INSTALL_DIR}/include" CACHE PATH "snappy include directory." FORCE) + +set(SNAPPY_LIBRARIES "${SNAPPY_INSTALL_DIR}/lib/libsnappy.a") ExternalProject_Add( extern_snappy @@ -51,8 +52,7 @@ ExternalProject_Add( ) add_library(snappy STATIC IMPORTED GLOBAL) -set_property(TARGET snappy PROPERTY IMPORTED_LOCATION - "${SNAPPY_INSTALL_DIR}/lib/libsnappy.a") +set_property(TARGET snappy PROPERTY IMPORTED_LOCATION ${SNAPPY_LIBRARIES}) include_directories(${SNAPPY_INCLUDE_DIR}) add_dependencies(snappy extern_snappy) diff --git a/cmake/external/snappystream.cmake b/cmake/external/snappystream.cmake index 5377a0b046a796cd6f0bb1fb466e1cd0b4b678bf..20a96430823d07a07d4bb4602e7fc0cfe55c3bf2 100644 --- a/cmake/external/snappystream.cmake +++ b/cmake/external/snappystream.cmake @@ -11,9 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -IF(MOBILE_INFERENCE) +IF(MOBILE_INFERENCE OR RPI) return() ENDIF() @@ -21,9 +20,11 @@ include (ExternalProject) # NOTE: snappy is needed when linking with recordio -SET(SNAPPYSTREAM_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy_stream) -SET(SNAPPYSTREAM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy_stream) -SET(SNAPPYSTREAM_INCLUDE_DIR "${SNAPPYSTREAM_INSTALL_DIR}/include/" CACHE PATH "snappy stream include directory." FORCE) +set(SNAPPYSTREAM_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy_stream) +set(SNAPPYSTREAM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy_stream) +set(SNAPPYSTREAM_INCLUDE_DIR "${SNAPPYSTREAM_INSTALL_DIR}/include" CACHE PATH "snappy stream include directory." FORCE) + +set(SNAPPYSTREAM_LIBRARIES "${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a") ExternalProject_Add( extern_snappystream @@ -51,8 +52,9 @@ ExternalProject_Add( ) add_library(snappystream STATIC IMPORTED GLOBAL) -set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION - "${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a") +set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION ${SNAPPYSTREAM_LIBRARIES}) + +include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) # For snappysteam to include its own headers. +include_directories(${THIRD_PARTY_PATH}/install) # For Paddle to include snappy stream headers. -include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) add_dependencies(snappystream extern_snappystream) diff --git a/cmake/external/threadpool.cmake b/cmake/external/threadpool.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0159815fed81bdff6de3e561af569e9edc75f947 --- /dev/null +++ b/cmake/external/threadpool.cmake @@ -0,0 +1,30 @@ +INCLUDE(ExternalProject) + +SET(THREADPOOL_SOURCE_DIR ${THIRD_PARTY_PATH}/threadpool) +SET(THREADPOOL_INCLUDE_DIR ${THREADPOOL_SOURCE_DIR}/src/extern_threadpool) +INCLUDE_DIRECTORIES(${THREADPOOL_INCLUDE_DIR}) + +ExternalProject_Add( + extern_threadpool + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/progschj/ThreadPool.git" + GIT_TAG 9a42ec1329f259a5f4881a291db1dcb8f2ad9040 + PREFIX ${THREADPOOL_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +if (${CMAKE_VERSION} VERSION_LESS "3.3.0") + set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/threadpool_dummy.c) + file(WRITE ${dummyfile} "const char *dummy_threadpool = \"${dummyfile}\";") + add_library(simple_threadpool STATIC ${dummyfile}) +else() + add_library(simple_threadpool INTERFACE) +endif() + +add_dependencies(simple_threadpool extern_threadpool) + +LIST(APPEND external_project_dependencies simple_threadpool) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 9a9a20f897e09b823dfb19ff841c3f2aeb3f9fe6..a631ad14b18310598f7eea3a51839d61a9e456ff 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -62,7 +62,8 @@ ExternalProject_Add( ) MESSAGE(STATUS "warp-ctc library: ${WARPCTC_LIBRARIES}") -INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) # For warpctc code to include its headers. +INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include warpctc headers. ADD_LIBRARY(warpctc SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET warpctc PROPERTY IMPORTED_LOCATION ${WARPCTC_LIBRARIES}) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 20b8506e678af4db6ccb65bef99d28e085a67bf2..c3d73235453c8c9fd2859c3ab142888e8bda2dbe 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -25,7 +25,8 @@ ELSE(WIN32) SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) ENDIF(WIN32) -INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) # For zlib code to include its own headers. +INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include zlib.h. ExternalProject_Add( extern_zlib diff --git a/cmake/generic.cmake b/cmake/generic.cmake index c749c97f13649fe8432091414b56f7d0ea8ace8b..1d3e2ade6d393c6e4c37eea0dc1064cdb18808a5 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -195,14 +195,7 @@ function(cc_library TARGET_NAME) list(REMOVE_ITEM cc_library_DEPS warpctc) add_dependencies(${TARGET_NAME} warpctc) endif() - if("${cc_library_DEPS}" MATCHES "ARCHIVE_START") - # Support linking flags: --whole-archive (Linux) / -force_load (MacOS). - # WARNING: Please don't use ARCHIVE_START&ARCHIVE_END if TARGET_NAME will be linked by other libraries. - target_circle_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) - list(REMOVE_ITEM cc_library_DEPS ARCHIVE_START ARCHIVE_END) - else() - target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) - endif() + target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) endif() @@ -243,15 +236,11 @@ function(cc_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - # Support linking flags: --whole-archive (Linux) / -force_load (MacOS) - target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) - if("${cc_test_DEPS}" MATCHES "ARCHIVE_START") - list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END) - endif() - add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction(cc_test) @@ -311,8 +300,8 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_test(${TARGET_NAME} ${TARGET_NAME}) endif() endfunction(nv_test) @@ -387,8 +376,8 @@ function(hip_test TARGET_NAME) endif() add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources}) set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) - target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) - add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) + add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) add_test(${TARGET_NAME} ${TARGET_NAME}) endif() endfunction(hip_test) @@ -561,9 +550,9 @@ function(py_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS ENVS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_ENVS} + COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS} ${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() @@ -587,6 +576,9 @@ function(grpc_library TARGET_NAME) get_filename_component(PROTO_WE ${grpc_library_PROTO} NAME_WE) get_filename_component(PROTO_PATH ${ABS_PROTO} PATH) + #FIXME(putcn): the follwoing line is supposed to generate *.pb.h and cc, but + # somehow it didn't. line 602 to 604 is to patching this. Leaving this here + # for now to enable dist CI. protobuf_generate_cpp(grpc_proto_srcs grpc_proto_hdrs "${ABS_PROTO}") set(grpc_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/${PROTO_WE}.grpc.pb.cc") set(grpc_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/${PROTO_WE}.grpc.pb.h") @@ -597,6 +589,9 @@ function(grpc_library TARGET_NAME) COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}" -I "${PROTO_PATH}" --plugin=protoc-gen-grpc="${GRPC_CPP_PLUGIN}" "${ABS_PROTO}" + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS --cpp_out "${CMAKE_CURRENT_BINARY_DIR}" -I "${PROTO_PATH}" + "${ABS_PROTO}" DEPENDS "${ABS_PROTO}" ${PROTOBUF_PROTOC_EXECUTABLE} extern_grpc) # FIXME(typhoonzero): grpc generated code do not generate virtual-dtor, mark it diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 0323cd9698cba916d2aa04403be97c0a6a463830..cc758019827b9a5416a801e4da43d754d4492a73 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -1,7 +1,22 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set_property(GLOBAL PROPERTY FLUID_MODULES "") # find all fluid modules is used for paddle fluid static library function(find_fluid_modules TARGET_NAME) get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE) + string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path}) string(FIND "${__target_path}" "fluid" pos) if(pos GREATER 1) get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) @@ -77,6 +92,23 @@ elseif (WITH_MKLML) ) endif() +if(NOT MOBILE_INFERENCE AND NOT RPI) + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappy") + copy(snappy_lib + SRCS ${SNAPPY_INCLUDE_DIR} ${SNAPPY_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) + + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappystream") + copy(snappystream_lib + SRCS ${SNAPPYSTREAM_INCLUDE_DIR} ${SNAPPYSTREAM_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) + + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/zlib") + copy(zlib_lib + SRCS ${ZLIB_INCLUDE_DIR} ${ZLIB_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) +endif() + # paddle fluid module set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle/fluid") diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index a9b27933a5307aabeaf150aeb859e869197229f5..0f9521616952a2857222feab8c38fb480761ee2d 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -1,2 +1,11 @@ +add_custom_target(paddle_apis ALL + DEPENDS paddle_v2_apis paddle_fluid_apis) + +add_custom_target(paddle_docs ALL + DEPENDS paddle_v2_docs paddle_v2_docs_cn + paddle_fluid_docs paddle_fluid_docs_cn + paddle_mobile_docs paddle_mobile_docs_cn) + add_subdirectory(v2) add_subdirectory(fluid) +add_subdirectory(mobile) diff --git a/doc/design/file_manager/README.md b/doc/design/file_manager/README.md deleted file mode 100644 index 3df10d801e568834729f902aace483d033340e2d..0000000000000000000000000000000000000000 --- a/doc/design/file_manager/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# FileManager设计文档 -## 目标 -在本文档中,我们设计说明了名为FileManager系统,方便用户上传自己的训练数据以进行分布式训练 - -主要功能包括: - -- 提供常用的命令行管理命令管理文件和目录 -- 支持大文件的断点上传、下载 - -## 名词解释 -- PFS:是`Paddlepaddle cloud File System`的缩写,是对用户文件存储空间的抽象,与之相对的是local filesystem。目前我们用CephFS来搭建。 -- [CephFS](http://docs.ceph.com/docs/master/cephfs/):一个POSIX兼容的文件系统。 -- Chunk:逻辑划上文件分块的单位。 - -## 模块 -### 架构图 - - -### PFSClient -- 功能: 详细设计[link](./pfs/pfsclient.md) - - 提供用户管理文件的命令 - - 需要可以跨平台执行 - -- 双向验证 - PFSClient需要和Ingress之间做双向验证[tls](#tls),所以用户需要首先在`cloud.paddlepaddle.org`上注册一下,申请用户空间,并且把系统生成的CA(certificate authority)、Key、CRT(CA signed certificate)下载到本地,然后才能使用PFSClient。 - -### [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- 功能: - 提供七层协议的反向代理、基于粘性会话的负载均衡功能。 - -- 透传用户身份的办法 - Ingress需要把PFSClient的身份信息传给PFSServer,配置的方法参考[link](http://www.integralist.co.uk/posts/clientcertauth.html#3) - -### PFSServer -PFSServer提供RESTful API接口,接收处理PFSClient端的文件管理请求,并且把结果返回PFSClient端。 - -RESTful API - -- /api/v1/files - - `GET /api/v1/files`: Get metadata of files or directories. - - `POST /api/v1/files`: Create files or directories. - - `PATCH /api/v1/files`: Update files or directories. - - `DELETE /api/v1/files`: Delete files or directories. - -- /api/v1/file/chunks - - `GET /api/v1/storage/file/chunks`: Get chunks's metadata of a file. - -- /api/v1/storage/files - - `GET /api/v1/storage/files`: Download files or directories. - - `POST /api/v1/storage/files`: Upload files or directories. - -- /api/v1/storage/file/chunks - - `GET /api/v1/storage/file/chunks`: Download chunks's data. - - `POST /api/v1/storage/file/chunks`: Upload chunks's data. - -## 文件传输优化 - -### 分块文件传输 -用户文件可能是比较大的,上传到Cloud或者下载到本地的时间可能比较长,而且在传输的过程中也可能出现网络不稳定的情况。为了应对以上的问题,我们提出了Chunk的概念,一个Chunk由所在的文件偏移、数据、数据长度及校验值组成。文件的上传和下载都是通过对Chunk的操作来实现的。由于Chunk比较小(默认256K),完成一个传输动作完成的时间也比较短,不容易出错。PFSClient需要在传输完毕最后一个Chunk的时候检查destination文件的MD5值是否和source文件一致。 - -一个典型的Chunk如下所示: - -``` -type Chunk struct { - fileOffset int64 - checksum uint32 - len uint32 - data []byte -} -``` - -### 生成sparse文件 -当destination文件不存在或者大小和source文件不一致时,可以用[Fallocate](https://Go.org/pkg/syscall/#Fallocate)生成sparse文件,然后就可以并发写入多个Chunk。 - -### 覆盖不一致的部分 -文件传输的的关键在于需要PFSClient端对比source和destination的文件Chunks的checksum是否保持一致,不一致的由PFSClient下载或者传输Chunk完成。这样已经传输成功的部分就不用重新传输了。 - -## 用户使用流程 -参考[link](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/cluster_train/data_dispatch.md) - -## 框架生成 -用[swagger](https://github.com/swagger-api/swagger-codegen)生成PFSClient和PFSServer的框架部分,以便我们可以把更多的精力放到逻辑本身上。 - -## 参考文档 -- [TLS complete guide](https://github.com/k8sp/tls/blob/master/tls.md) -- [aws.s3](http://docs.aws.amazon.com/cli/latest/reference/s3/) -- [linux man document](https://linux.die.net/man/) diff --git a/doc/design/file_manager/pfs/pfsclient.md b/doc/design/file_manager/pfs/pfsclient.md deleted file mode 100644 index 56bc70c54bbc92b78d66e04fb495b1300cf8ebe0..0000000000000000000000000000000000000000 --- a/doc/design/file_manager/pfs/pfsclient.md +++ /dev/null @@ -1,129 +0,0 @@ -# PFSClient - -## Description -The `pfs` command is a Command Line Interface to manage your files on PaddlePaddle Cloud - -## Synopsis -``` -paddle [options] pfs [parameters] -``` - -## Options -``` ---profile (string) - Use a specific profile from your credential file. - ---help (string) - Display more information about command - ---version - Output version information and exit - ---debug - Show detailed debugging log - ---only-show-errors (boolean) - Only errors and warnings are displayed. All other output is suppressed. -``` - -## Path Arguments -When using a command, we need to specify path arguments. There are two path argument type: `localpath` and `pfspath`. - -A `pfspath` begin with `/pfs`, eg: `/pfs/$DATACENTER/home/$USER/folder`. - -[Here](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/cluster_train/data_dispatch.md#上传训练文件) is how to config datacenters. - -## order of Path Arguments -Commonly, if there are two path arguments, the first is the source, and the second is the destination. - -## Subcommonds -- rm - remove files or directories - -``` -Synopsis: - rm [-r] [-v] ... - -Options: - -r - Remove directories and their contents recursively - -v - Cause rm to be verbose, showing files after they are removed. - -Examples: - paddle pfs rm /pfs/$DATACENTER/home/$USER/file - paddle pfs rm -r /pfs/$DATACENTER/home/$USER/folder -``` -- mv - move (rename) files - -``` -Synopsis: - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - -Options: - -f - Do not prompt for confirmation before overwriting the destination path. (The -f option overrides previous -n options.) - -n - Do not overwrite an existing file. (The -n option overrides previous -f options.) - -v - Cause mv to be verbose, showing files after they are moved. - -Examples: - paddle pfs mv ./text1.txt /pfs/$DATACENTER/home/$USER/text1.txt -``` -- cp - copy files or directories - -``` -Synopsis: - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - -Options: - -r - Copy directories recursively - -f - Do not prompt for confirmation before overwriting the destination path. (The -f option overrides previous -n options.) - -n - Do not overwrite an existing file. (The -n option overrides previous -f options.) - -v - Cause cp to be verbose, showing files after they are copied. - --preserve--links - Reserve links when copy links - -Examples: - paddle pfs cp ./file /pfs/$DATACENTER/home/$USER/file - paddle pfs cp /pfs/$DATACENTER/home/$USER/file ./file -``` -- ls- list files - -``` -Synopsis: - ls [-r] ... - -Options: - -R - List directory(ies) recursively - -Examples: - paddle pfs ls /pfs/$DATACENTER/home/$USER/file - paddle pfs ls /pfs/$DATACENTER/home/$USER/folder -``` - -- mkdir - mkdir directory(ies) -Create intermediate directory(ies) as required. - -``` -Synopsis: - mkdir ... - -Examples: - paddle pfs mkdir /pfs/$DATACENTER/home/$USER/folder -``` diff --git a/doc/design/file_manager/src/filemanager.graffle b/doc/design/file_manager/src/filemanager.graffle deleted file mode 100644 index 7861a33072bc1908f69d12b37c20491dd8663103..0000000000000000000000000000000000000000 Binary files a/doc/design/file_manager/src/filemanager.graffle and /dev/null differ diff --git a/doc/design/file_manager/src/filemanager.png b/doc/design/file_manager/src/filemanager.png deleted file mode 100644 index 8139a19f5722f56d3c211f3ab0d3982f751134b9..0000000000000000000000000000000000000000 Binary files a/doc/design/file_manager/src/filemanager.png and /dev/null differ diff --git a/doc/fluid/CMakeLists.txt b/doc/fluid/CMakeLists.txt index cc999f5a8d70a2239ea3b130e9da172d5f681c65..8086507bb4b7e870ad6d6091945ed07a00b5100b 100644 --- a/doc/fluid/CMakeLists.txt +++ b/doc/fluid/CMakeLists.txt @@ -27,6 +27,8 @@ sphinx_add_target(paddle_fluid_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) +add_dependencies(paddle_fluid_docs gen_proto_py paddle_python) + # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -47,3 +49,7 @@ sphinx_add_target(paddle_fluid_docs_cn ${SPHINX_CACHE_DIR_CN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) + +add_dependencies(paddle_fluid_docs_cn gen_proto_py paddle_python) + +add_subdirectory(api) diff --git a/doc/fluid/api/CMakeLists.txt b/doc/fluid/api/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..48b396f0786adad1ba6cd41f72497f853e54bc38 --- /dev/null +++ b/doc/fluid/api/CMakeLists.txt @@ -0,0 +1,22 @@ +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../../templates/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_fluid_apis + html + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_EN}) + +add_dependencies(paddle_fluid_apis gen_proto_py framework_py_proto copy_paddle_pybind paddle_python) diff --git a/doc/v2/api/fluid/data_feeder.rst b/doc/fluid/api/data_feeder.rst similarity index 100% rename from doc/v2/api/fluid/data_feeder.rst rename to doc/fluid/api/data_feeder.rst diff --git a/doc/v2/api/fluid/evaluator.rst b/doc/fluid/api/evaluator.rst similarity index 100% rename from doc/v2/api/fluid/evaluator.rst rename to doc/fluid/api/evaluator.rst diff --git a/doc/v2/api/fluid/executor.rst b/doc/fluid/api/executor.rst similarity index 100% rename from doc/v2/api/fluid/executor.rst rename to doc/fluid/api/executor.rst diff --git a/doc/v2/api/fluid/gen_doc.py b/doc/fluid/api/gen_doc.py similarity index 100% rename from doc/v2/api/fluid/gen_doc.py rename to doc/fluid/api/gen_doc.py diff --git a/doc/v2/api/fluid/gen_doc.sh b/doc/fluid/api/gen_doc.sh similarity index 100% rename from doc/v2/api/fluid/gen_doc.sh rename to doc/fluid/api/gen_doc.sh diff --git a/doc/v2/api/fluid/index.rst b/doc/fluid/api/index_en.rst similarity index 100% rename from doc/v2/api/fluid/index.rst rename to doc/fluid/api/index_en.rst diff --git a/doc/v2/api/fluid/initializer.rst b/doc/fluid/api/initializer.rst similarity index 100% rename from doc/v2/api/fluid/initializer.rst rename to doc/fluid/api/initializer.rst diff --git a/doc/v2/api/fluid/io.rst b/doc/fluid/api/io.rst similarity index 100% rename from doc/v2/api/fluid/io.rst rename to doc/fluid/api/io.rst diff --git a/doc/v2/api/fluid/layers.rst b/doc/fluid/api/layers.rst similarity index 98% rename from doc/v2/api/fluid/layers.rst rename to doc/fluid/api/layers.rst index ae35d8c53476b34cb18331364267dd7c8b94dd64..5c02886efd7d11e9520910526fb90ec01e123bae 100644 --- a/doc/v2/api/fluid/layers.rst +++ b/doc/fluid/api/layers.rst @@ -473,6 +473,12 @@ multiplex .. autofunction:: paddle.fluid.layers.multiplex :noindex: +label_smooth +------------ + +.. autofunction:: paddle.fluid.layers.label_smooth + :noindex: + ops === @@ -494,6 +500,12 @@ reshape .. autofunction:: paddle.fluid.layers.reshape :noindex: +pad +--- + +.. autofunction:: paddle.fluid.layers.pad + :noindex: + scale ----- diff --git a/doc/v2/api/fluid/nets.rst b/doc/fluid/api/nets.rst similarity index 100% rename from doc/v2/api/fluid/nets.rst rename to doc/fluid/api/nets.rst diff --git a/doc/v2/api/fluid/optimizer.rst b/doc/fluid/api/optimizer.rst similarity index 100% rename from doc/v2/api/fluid/optimizer.rst rename to doc/fluid/api/optimizer.rst diff --git a/doc/v2/api/fluid/param_attr.rst b/doc/fluid/api/param_attr.rst similarity index 100% rename from doc/v2/api/fluid/param_attr.rst rename to doc/fluid/api/param_attr.rst diff --git a/doc/v2/api/fluid/profiler.rst b/doc/fluid/api/profiler.rst similarity index 100% rename from doc/v2/api/fluid/profiler.rst rename to doc/fluid/api/profiler.rst diff --git a/doc/v2/api/fluid/regularizer.rst b/doc/fluid/api/regularizer.rst similarity index 100% rename from doc/v2/api/fluid/regularizer.rst rename to doc/fluid/api/regularizer.rst diff --git a/doc/fluid/build_and_install/build_from_source_cn.rst b/doc/fluid/build_and_install/build_from_source_cn.rst new file mode 120000 index 0000000000000000000000000000000000000000..ae4e8c7c48e584ec16a7be5466f83dd154ffb5fb --- /dev/null +++ b/doc/fluid/build_and_install/build_from_source_cn.rst @@ -0,0 +1 @@ +../../v2/build_and_install/build_from_source_cn.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/build_from_source_en.rst b/doc/fluid/build_and_install/build_from_source_en.rst new file mode 120000 index 0000000000000000000000000000000000000000..1ac828c973826bb8374c4aa8e17fda3ea1bb939f --- /dev/null +++ b/doc/fluid/build_and_install/build_from_source_en.rst @@ -0,0 +1 @@ +../../v2/build_and_install/build_from_source_en.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/docker_install_cn.rst b/doc/fluid/build_and_install/docker_install_cn.rst new file mode 120000 index 0000000000000000000000000000000000000000..965b2e20559291989422938c418fadbac16941b9 --- /dev/null +++ b/doc/fluid/build_and_install/docker_install_cn.rst @@ -0,0 +1 @@ +../../v2/build_and_install/docker_install_cn.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/docker_install_en.rst b/doc/fluid/build_and_install/docker_install_en.rst new file mode 120000 index 0000000000000000000000000000000000000000..79d7341a7bbb9e477c773134f24983fd7607769a --- /dev/null +++ b/doc/fluid/build_and_install/docker_install_en.rst @@ -0,0 +1 @@ +../../v2/build_and_install/docker_install_en.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/index_cn.rst b/doc/fluid/build_and_install/index_cn.rst deleted file mode 100644 index 9276236f9fd511bde3570a8c88b437119911d60a..0000000000000000000000000000000000000000 --- a/doc/fluid/build_and_install/index_cn.rst +++ /dev/null @@ -1,2 +0,0 @@ -安装与使用 ------------- diff --git a/doc/fluid/build_and_install/index_cn.rst b/doc/fluid/build_and_install/index_cn.rst new file mode 120000 index 0000000000000000000000000000000000000000..f697fcd8fac9131862ae7f8f51c5ebe93737ad2d --- /dev/null +++ b/doc/fluid/build_and_install/index_cn.rst @@ -0,0 +1 @@ +../../v2/build_and_install/index_cn.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/index_en.rst b/doc/fluid/build_and_install/index_en.rst deleted file mode 100644 index cc1e61a58a026a0f5c3b106875a8a86dc9cba613..0000000000000000000000000000000000000000 --- a/doc/fluid/build_and_install/index_en.rst +++ /dev/null @@ -1,2 +0,0 @@ -Build and Install ------------- diff --git a/doc/fluid/build_and_install/index_en.rst b/doc/fluid/build_and_install/index_en.rst new file mode 120000 index 0000000000000000000000000000000000000000..502f66a41319d4f41ae1774628ca36da9dca76ce --- /dev/null +++ b/doc/fluid/build_and_install/index_en.rst @@ -0,0 +1 @@ +../../v2/build_and_install/index_en.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/pip_install_cn.rst b/doc/fluid/build_and_install/pip_install_cn.rst new file mode 120000 index 0000000000000000000000000000000000000000..07deca84b82ff553e0c19324695089dcfb6be90e --- /dev/null +++ b/doc/fluid/build_and_install/pip_install_cn.rst @@ -0,0 +1 @@ +../../v2/build_and_install/pip_install_cn.rst \ No newline at end of file diff --git a/doc/fluid/build_and_install/pip_install_en.rst b/doc/fluid/build_and_install/pip_install_en.rst new file mode 120000 index 0000000000000000000000000000000000000000..7f39c998195b719b05443e96f1c4a6a8d44b98c9 --- /dev/null +++ b/doc/fluid/build_and_install/pip_install_en.rst @@ -0,0 +1 @@ +../../v2/build_and_install/pip_install_en.rst \ No newline at end of file diff --git a/doc/fluid/design/algorithm/index_cn.rst b/doc/fluid/design/algorithm/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..0883a9dc9c457f393ac1bdc930cb47ebcb0a25d9 --- /dev/null +++ b/doc/fluid/design/algorithm/index_cn.rst @@ -0,0 +1,7 @@ +梯度更新算法 +------------ + +.. toctree:: + :maxdepth: 1 + + parameter_average.md diff --git a/doc/fluid/design/algorithm/index_en.rst b/doc/fluid/design/algorithm/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..59fe68dcf79ce2ef90b9adc829a0db45a4f0b3dc --- /dev/null +++ b/doc/fluid/design/algorithm/index_en.rst @@ -0,0 +1,7 @@ +Gradient Update Algorithm +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + parameter_average.md diff --git a/doc/fluid/design/algorithm/parameter_average.md b/doc/fluid/design/algorithm/parameter_average.md index 2c4edee9fe31d502ea62b9fe5c8757c0a4c5e79f..940d37fb31dcd0c50ea6c4c42b052d7cb23a9c47 100644 --- a/doc/fluid/design/algorithm/parameter_average.md +++ b/doc/fluid/design/algorithm/parameter_average.md @@ -5,9 +5,11 @@ In a large scale machine learning setup where the size of the training data is h Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset. -Hence, to accelerate the speed of Stochastic Gradient Descent, Averaged Stochastic Gradient Descent (ASGD) was proposed in Polyak and Juditsky (1992). For ASGD, the running average of parameters obtained by SGD, is used as the estimator for
. The averaging is done as follows: +Hence, to accelerate the speed of Stochastic Gradient Descent, Averaged Stochastic Gradient Descent (ASGD) was proposed in Polyak and Juditsky (1992). For ASGD, the running average of parameters obtained by SGD, is used as the estimator for
. The averaging is done as follows: -
+

+
+

We propose averaging for any optimizer similar to how ASGD performs it, as mentioned above. diff --git a/doc/fluid/design/concepts/README.md b/doc/fluid/design/concepts/README.md index bf0e4dddc1b640ecbce489f65820aaf8a4b3b1e7..8ded0ad22f4013a521bf3bee260565dc5cf855ae 100644 --- a/doc/fluid/design/concepts/README.md +++ b/doc/fluid/design/concepts/README.md @@ -2,15 +2,37 @@ A few months ago when we were trying to replace CMake with Bazel, @emailweixu su Here are some initial thoughts. Your comments are welcome! -### Required CMake Function +# Required CMake Function I think we need only the following few CMake functions to make a project description mean and clean: -| C++ | CUDA C++ | Go | -|---|---|---| -| cc_library | nv_library | go_library | -| cc_binary | nv_binary | go_binary | -| cc_test | nv_test | go_test | + + + + + + + + + + + + + + + + + + + + + + + + + +
C++CUDA C++Go
cc_library nv_library go_library
cc_binary nv_binary go_binary
cc_test nv_test go_test
+ - The `_library` functions generate .a files from source code. - The `_binary` functions generate executable binary files. @@ -25,7 +47,7 @@ Also, - to describe external dependencies, we need `external_library`. - to build shared libraries, we need `shared_library`. -### An Example Project +## An Example Project Suppose that we have aforementioned functions defined in our `/cmake` directory. The following example `CMakeLists.txt` describes a project including the following source files: @@ -102,11 +124,11 @@ shared_library(api ``` -### Implementation +## Implementation As above example CMakeLists.txt executes, each function invocation adds "nodes" to a dependency graph. It also use this graph to generate CMake commands including `add_executable`, `add_dependencies`, `target_link_libraries`, and `add_test`. -### Using Package Manager For Go +## Using Package Manager For Go Building Go binaries and libraries need to satisfy their dependencies, generally we can do `go get ./...` to download and compile all external dependencies. The @@ -122,7 +144,7 @@ problems are: at many cloud file hosting, so users what to compile paddle by themselves can download this "vendor" package from a mirror site. -#### Choose A Suitable Tool +### Choose A Suitable Tool As mentioned by @wangkuiyi, [Here](https://github.com/golang/go/wiki/PackageManagementTools) list dozens of Go package managers. We choose the tool using following principles: @@ -140,7 +162,7 @@ management tool has been started at: https://github.com/golang/dep to resolve such problems, but it's currently at Alpha stage. So the best choice now is glide obviously. -#### Manage Go Packages +### Manage Go Packages - Dependencies: `go/glide.yaml` will store the dependencies and their versions which is directly imported by paddle. `go/glide.lock` will store all dependencies recursively diff --git a/doc/fluid/design/concepts/block.md b/doc/fluid/design/concepts/block.md index 907a2def557fd472ac4d679c73447bd9107d1190..3b626bd89cd83a9428997abccfeeebbbbdbb3d38 100644 --- a/doc/fluid/design/concepts/block.md +++ b/doc/fluid/design/concepts/block.md @@ -14,11 +14,29 @@ In programming languages, a block is a pair of curly braces that includes local Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning: -| programming languages | PaddlePaddle | -|-----------------------|-----------------------| -| for, while loop | RNN, WhileOp | -| if, if-else, switch | IfElseOp, SwitchOp | -| sequential execution | a sequence of layers | + + + + + + + + + + + + + + + + + + + + + +
programming languagesPaddlePaddle
for, while loop RNN, WhileOp
if, if-else, switch IfElseOp, SwitchOp
sequential execution a sequence of layers
+ A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes. @@ -26,12 +44,33 @@ A key difference is that a C++ program describes a one pass computation, whereas The existence of the backward pass makes the execution of a block of PaddlePaddle different from traditional programs: -| programming languages | PaddlePaddle | -|-----------------------|---------------------------------| -| stack | scope hierarchy | -| stack frame | scope | -| push at entering block| push at entering block | -| pop at leaving block | destroy when minibatch completes| + + + + + + + + + + + + + + + + + + + + + + + + + +
programming languagesPaddlePaddle
stack scope hierarchy
stack frame scope
push at entering block push at entering block
pop at leaving block destroy when minibatch completes
+ 1. In traditional programs: diff --git a/doc/fluid/design/concepts/cpp_data_feeding.md b/doc/fluid/design/concepts/cpp_data_feeding.md index 8607b40ccbbe01db77afed72c1efa780b520744c..aabc1ba75a67c5767d409bd6e7e6240dec86b16c 100644 --- a/doc/fluid/design/concepts/cpp_data_feeding.md +++ b/doc/fluid/design/concepts/cpp_data_feeding.md @@ -113,7 +113,7 @@ To solve this problem, we introduce `ReaderHolder` as a wrapper. It acts as an e To create and invoke readers, some new ops are introduced: -### CreateReaderOp +### Operators That Create Readers Each reader has its creation op. File readers' creation ops have no input and yield the created file reader as its output. Decorated readers' creation ops take the underlying readers as inputs and then yield new decorated readers. @@ -153,19 +153,52 @@ double_buffer_reader = create_double_buffer_op(batch_reader) The forwarding ops of the corresponding `main_program` would be like this: ``` -while_op { +not_completed = true +pass_count = 0 +while_op(not_completed) { has_next = has_next_op(double_buffer_reader) if_else_op(has_next) { batch_data = read_op(double_buffer_reader) ... (subsequent training ops) } else { reset_op(double_buffer_reader) + increase_op(pass_count) + not_completed = less_than_op(pass_count, reqiured_pass_num) } } ``` -Two important considerations for these programs are as follows: +A few important considerations for these programs are as follows: -1. The multiple\_reader is the batch\_reader's underlying reader, and the batch\_reader is the double\_buffer\_reader's underlying reader. `read_op`, `has_next_op` and other reader related ops will only invoke the top-most reader. In this case, it's the double\_buffer\_reader. +1. `not_completed`, `pass_count` and other variables shown above are all Fluid Variables. -2. All readers exist in both `startup_program` and `main_program`. And they are persistable. +2. The multiple\_reader is the batch\_reader's underlying reader, and the batch\_reader is the double\_buffer\_reader's underlying reader. `read_op`, `has_next_op` and other reader related ops will only invoke the top-most reader. In this case, it's the double\_buffer\_reader. + +3. All readers exist in both `startup_program` and `main_program`. And they are persistable. + +### Simplify Configuration by MultiPassReader + +The Program configuration mentioned above is complicated. Users need to be very familiar to concepts of Program and Block to prevent making mistakes in their code. To make the usage of C++ readers more friendly to new users, we introduce `MultiPassReader`. + +`MultiPassReader` is a decorated reader. A multi-pass reader is used to continuously yield data for several training passes. It takes the number of passes to run as one of its attributes('pass_num') and maintains a counter to record how many passes it has completed. Each time its underlying reader reaches the EOF, the multi-pass reader checks whether it has completed the training of given number of pass. If not, the underlying reader will be re-initialized and starts a new pass automatically. Before completing the whole training, the return of MultiPassReader's `HasNext()` will always be `true`. + +With `MultiPassReader`, the startup program would be like this: + +``` +multiple_reader = open_files_op(...) +batch_reader = create_batch_reader_op(multiple_reader) +multi_pass_reader = create_multi_pass_reader_op(batch_reader) +double_buffer_reader = create_double_buffer_op(multi_pass_reader) +... (other initializers) +``` + +The forwarding part of the corresponding `main_program` would be like this: + +``` +not_completed = true +while_op(not_completed) { + batch_data = read_op(double_buffer_reader) + ... (subsequent training ops) + not_completed = has_next_op(double_buffer_reader) +} +``` diff --git a/doc/fluid/design/concepts/functions_operators_layers.md b/doc/fluid/design/concepts/functions_operators_layers.md index 984b59f4c6971dfb6f46dfe342f2751f392c0e88..30bc488a18a28d349645d9d2502aae6691a69931 100644 --- a/doc/fluid/design/concepts/functions_operators_layers.md +++ b/doc/fluid/design/concepts/functions_operators_layers.md @@ -86,12 +86,40 @@ def layer.fc(X): We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example: - -| C++ functions/functors | mul | add | | | -|------------------------|--------------|--------------|-------------|----------| -| C++ operator class | mulOp | addOp | FCOp | | -| Python binding | operator.mul | operator.add | operator.fc | | -| Python function | | | | layer.fc | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C++ functions/functorsmuladd
C++ operator class mulOpaddOp FCOp
Python binding operator.mul operator.add operator.fc
Python function layer.fc
This is how we differentiate layer and operators in PaddlePaddle: diff --git a/doc/fluid/design/concepts/images/parallel_executor_overview.dot b/doc/fluid/design/concepts/images/parallel_executor_overview.dot new file mode 100644 index 0000000000000000000000000000000000000000..40753cb140540c08d9d4c449b8d377e315280436 --- /dev/null +++ b/doc/fluid/design/concepts/images/parallel_executor_overview.dot @@ -0,0 +1,83 @@ +digraph G { + subgraph cluster_init { + label="Initialization" + startup_program [label="startup", shape=box] + node_w_g0 [label="W\nGPU0"] + startup_program -> node_w_g0 [label="Initialize"] + node_w_g1 [label="W\nGPU1"] + node_w_g0 -> node_w_g1 [label="broadcast"] + } + + subgraph cluster_train { + label="forward_backward" + + subgraph cluster_gpu0 { + label="GPU0" + fc_0 [label="fc\nGPU0", shape=box] + hidden_0 [label="hidden\nGPU0"] + node_w_g0 -> fc_0 + fc_0 -> hidden_0 + loss0 [label="loss\nGPU0"] + hidden_0 -> loss0 [label="many ops omitted"] + scale_loss_0 [label="scale_loss_gradient\nGPU0", shape=box] + loss_g0 [label="loss_grad\nGPU0"] + scale_loss_0->loss_g0 + + fc_g_0 [label="w_grad\nGPU0", shape=box] + loss0 -> fc_g_0 + loss_g0 -> fc_g_0 + hidden_0 -> fc_g_0 + } + + subgraph cluster_gpu1 { + label="GPU1" + fc_1 [label="fc\nGPU1", shape=box] + hidden_1 [label="hidden\nGPU1"] + node_w_g1 -> fc_1 + fc_1 -> hidden_1 + loss1 [label="loss\nGPU1"] + hidden_1 -> loss1 [label="many ops omitted"] + scale_loss_1 [label="scale_loss_gradient\nGPU1", shape=box] + loss_g1 [label="loss_grad\nGPU1"] + scale_loss_1->loss_g1 + + fc_g_1 [label="w_grad\nGPU1", shape=box] + loss1 -> fc_g_1 + loss_g1 -> fc_g_1 + hidden_1 -> fc_g_1 + } + } + + all_reduce_w [label="Merge Gradients(AllReduce)", shape=box] + fc_g_0 -> all_reduce_w + fc_g_1 -> all_reduce_w + + fc_g_0_merged [label="w_grad\nMerged\nGPU0"] + fc_g_1_merged [label="w_grad\nMerged\nGPU1"] + all_reduce_w -> fc_g_0_merged + all_reduce_w -> fc_g_1_merged + + subgraph cluster_optimization { + label="Optimization" + subgraph cluster_opt_gpu0 { + label="GPU0" + sgd_0 [label="SGD Op\nGPU0", shape=box] + + fc_g_0_merged -> sgd_0 + node_w_g0 -> sgd_0 + optimized_w_0 [label="Optimized W\nGPU0"] + sgd_0 -> optimized_w_0 + } + subgraph cluster_opt_gpu1 { + label="GPU1" + sgd_1 [label="SGD Op\nGPU1", shape=box] + + fc_g_1_merged -> sgd_1 + node_w_g1 -> sgd_1 + optimized_w_1 [label="Optimized W\nGPU0"] + sgd_1 -> optimized_w_1 + } + } + + +} diff --git a/doc/fluid/design/concepts/images/parallel_executor_overview.png b/doc/fluid/design/concepts/images/parallel_executor_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..d890c0ffee3b38dc7cb74a2b56c2ab4831532211 Binary files /dev/null and b/doc/fluid/design/concepts/images/parallel_executor_overview.png differ diff --git a/doc/fluid/design/concepts/index_cn.rst b/doc/fluid/design/concepts/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..dcdc894937ff328e6002623275ca3c65e87b2bb0 --- /dev/null +++ b/doc/fluid/design/concepts/index_cn.rst @@ -0,0 +1,19 @@ +核心概念 +------------- + +.. toctree:: + :maxdepth: 1 + + README.md + cpp_data_feeding.md + functions_operators_layers.md + program.md + variable.md + var_desc.md + tensor.md + tensor_array.md + lod_tensor.md + block.md + scope.md + executor.md + parallel_executor.md diff --git a/doc/fluid/design/concepts/index_en.rst b/doc/fluid/design/concepts/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b85a3055746facaa642e8fc899976b58435f1ef2 --- /dev/null +++ b/doc/fluid/design/concepts/index_en.rst @@ -0,0 +1,19 @@ +Core Concepts +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + README.md + cpp_data_feeding.md + functions_operators_layers.md + program.md + variable.md + var_desc.md + tensor.md + tensor_array.md + lod_tensor.md + block.md + scope.md + executor.md + parallel_executor.md diff --git a/doc/fluid/design/concepts/lod_tensor.md b/doc/fluid/design/concepts/lod_tensor.md index 10a8a7867fbf072f585fe3bfb1243e4e6bef4ec8..a88292e7888d0ebc64ee89ca315dfea38a12c71d 100644 --- a/doc/fluid/design/concepts/lod_tensor.md +++ b/doc/fluid/design/concepts/lod_tensor.md @@ -2,12 +2,38 @@ Like other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require all sequences in a mini-batch to be of the same length. Thus no need for padding zeros. -| | TensorFlow | PaddlePaddle | -|-----------------------|------------|--------------| -| RNN | Support | Support | -| recursive RNN | Support | Support | -| padding zeros | Must | No need | -| blob data type | Tensor | LoDTensor | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TensorFlowPaddlePaddle
RNN Support Support
recursive RNN Support Support
padding zeros Must No need
blob data type Tensor LoDTensor
+ PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segment a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. diff --git a/doc/fluid/design/concepts/parallel_executor.md b/doc/fluid/design/concepts/parallel_executor.md new file mode 100644 index 0000000000000000000000000000000000000000..4f88e27bed722e9f2f535e368926fe49b4e72e56 --- /dev/null +++ b/doc/fluid/design/concepts/parallel_executor.md @@ -0,0 +1,104 @@ +# ParallelExecutor + +## Background + +Neural network models are defined as a `ProgramDesc` in Fluid. The `ProgramDesc` can be executed by an interpreter(i.e. the `executor` concept in Fluid). The instructions or operators in a `Program` will be executed, and the results will be fetched in Python side. + +The executor is a very naive interpreter. It runs operators one by one. We can use `Parallel.Do` to support data parallelism, however, lacking device information in `ProgramDesc`; it is not possible to optimize the performance of `Parallel.Do`. + +We want a `ProgramDesc` can be run on different nodes. It is better not to contain device information in `ProgramDesc`. However, we can write a high-performance interpreter, which can hold an alternative intermediate representation of `ProgramDesc`, to take full usage of Multi-GPUs. + +ParallelExecutor is an interpreter of `ProgramDesc` which will [out-of-order execute](https://en.wikipedia.org/wiki/Out-of-order_execution) `Program` in data parallelism mode and maximise the utility of Multi-GPUs. + + +## Overview of MultiGPUs logic + +The ParallelExecutor takes the startup program and main program as inputs. The parameters will be initialised on `GPU0` by startup program and will broadcast to multi-GPUs. The main program will be duplicated into multi-GPUs. The gradient will be merged during each iteration, and each device will optimize parameters independently. Since the gradients on each device will be merged before parameter optimization, the parameters will be the same on each device and it does not need to be broadcast the parameters. + +![alt](images/parallel_executor_overview.png) + +There are several optimizations for this logic. + +1. We use an alternate representation in ParallelExecutor. It because the device information is critical for performance optimization. +2. The execution is out-of-order, i.e., an operator will be executed whenever the inputs of the operator are ready. + * GPU is a high-performance device; only one CPU thread cannot fulfil one GPU. So there is a thread pool to execute operators. + * Out-of-order also helps transpilers to generate `ProgramDesc`. It is no need to concern about the best order of performance when implementing a transpiler. +3. The streams of computation, merge gradients and fetch data are different. + +The performance of `ResNeXt152` on `TitanX` which `batch_size=12` is shown below. + +| Number of GPUs | 1 | 2 | 3 | 4| +| --- | --- | --- | --- | --- | +| Image/Sec | 17.9906 | 25.771 | 36.911 | 48.8428 | +| Speed Up | N/A | 1.43247029 | 2.05168255 | 2.71490667 | + + +## Static single assignment Graph + +[Static single assignment form](https://en.wikipedia.org/wiki/Static_single_assignment_form)(`SSA` for short) is a common form for compiler optimization. To implement concurrent execution, we uses an `SSA` graph as an intermedia representation of `ProgramDesc`. + +The `Program` is a directed acyclic graph, since a variable can be assigned multiple times. We enforce a variable will be assigned once, by adding version number to varaibles. We parsing the `Program` into a `SSA` graph. Also, ProgramExecutor duplicate `Program` into multi-devices. We also add a device number to varaibles and insert `NCCLAllReduce` into Graph. + +The data structure of `SSA` graph is: + +```c++ +struct VarHandleBase { + OpHandleBase* generated_op_; + vector pending_ops_; + + string name; + Place place; + size_t version; +}; + +struct OpHandleBase { + vector inputs_; + vector outputs_; +}; + +struct SSAGraph { + // vars on each devices. + // * the vars in each map in vector is on different device. + // * the map is mapping a variable name to variable handles + // with different versions + vector>> vars_; + + // All ops + vector ops_; +}; +``` +The variable handles are the wrapper of `Variables`. The operator handles are the wrapper of `OperatorBase`. Some `OpHandle` is not an `OperatorBase`, such as `NCCLAllReduceOpHandle`, because `AllReduceOpHandle` will use new device contexts. + +When the `ProgramDesc` converted into an `SSA` Graph, the [data hazard](https://en.wikipedia.org/wiki/Hazard_(computer_architecture)) problem is also need to be taken care. The dummy variables, which represent the dependency between operators, will be manually inserted into SSA graph to resolve the [data hazard](https://en.wikipedia.org/wiki/Hazard_(computer_architecture)) problem. + +## Execute SSA Graph + +The SSA graph can be out-of-order executed by an approximate [topological sorting](https://en.wikipedia.org/wiki/Topological_sorting) algorithm. The algorithm is + +1. Maintaining a map of an operator and its needed input number. +2. If a variable is not generated by an operator, i.e., `var.generated_op == nullptr`, decrease the needed input number of its pending operators. +3. If there is an operator which needed input number is decreased to zero, just run this operator. +4. After run this operator, just mark the variables are generated and repeat step 2 until all variables are generated. + +Running an operator can be asynchronized. There is a thread pool to execute an `SSA` graph. + +## Synchronize GPU Kernels + +The GPU is a non-blocking device. The different streams need be synchronized when switching streams. In current implementation, the synchronization based on the following algorithm: + +1. `OpHandle` will record `DeviceContext` that it is used. +2. In `OpHandle::Run`, if the `DeviceContext` of current operator is different from `DeviceContext` of any input variable, just wait the generate operator of this input variable. + +The `wait` are implemented by two strategies: + +1. Invoke `DeviceContext->Wait()`, It will wait all operators on this device contexts complete. +2. Uses `cudaStreamWaitEvent` to sending a event to the stream. It is a non-blocking call. The wait operators will be executed in GPU. + +Generally, the `cudaStreamWaitEvent` will have a better perforamnce. However, `DeviceContext->Wait()` strategy is easier to debug. The strategy can be changed in runtime. + +## What's next? + +* Merging gradient of dense parameters has been done. However, the merging of sparse parameters has not been done. +* The CPU version of Parallel Executor has not been implemented. The out-of-order logic will make CPU compuatation faster, too. +* A better strategy to merge gradients can be introduced. We can shrink the gradients from `float32` to `int8` or `int4` while merging. It will significantly speed up multi-GPUs training without much loss of precision. +* Combine multi-Nodes implementation. By the benifit of out-of-order, sending and recving operator can be an blocking operator, and the transpiler does not need to concern about the best position of operator. diff --git a/doc/fluid/design/concepts/scope.md b/doc/fluid/design/concepts/scope.md index 4da76eebb74abcd26ec2b8671399e6bc4fb58574..dcf76649357aaef80d6bc1a933ece8c4c1063547 100644 --- a/doc/fluid/design/concepts/scope.md +++ b/doc/fluid/design/concepts/scope.md @@ -30,7 +30,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. Variable can not belong to many scopes. If you want to use variables from parent scope, you can use `parent scope`. -1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else. +1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else. Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be an invalid pointer when associated `Scope` is destroyed. @@ -78,7 +78,7 @@ In `Scope` class, there is a private data member called `parent_`. `parent_` is A local scope is very useful when we implement Recurrent Neural Network. Each timestep of an RNN should be a `Net`. Each `Net` of timestep (`StepNet` for short) should use an independent local scope. Just like variables in a while loop is inside a local scope in programming languages. By using a single `StepNet` and changing local scope, we can implement an RNN easily. -# Interface Design +## Interface Design ```cpp class Variable { diff --git a/doc/fluid/design/concepts/var_desc.md b/doc/fluid/design/concepts/var_desc.md index 6a45af1995463402ba9c65ddb51c6c8bb107f99e..6750323c0167bf1efbde6ef4fd670e88a5aa502a 100644 --- a/doc/fluid/design/concepts/var_desc.md +++ b/doc/fluid/design/concepts/var_desc.md @@ -1,3 +1,5 @@ +# Design Doc: Var_desc + ## Background PaddlePaddle divides the description of neural network computation into two stages: compile time and runtime. At compile time, the neural network computation is described as a `ProgramDesc` whereas at runtime an `Executor` interprets the `ProgramDesc` to compute the operations. @@ -8,10 +10,27 @@ PaddlePaddle uses proto message to describe compile time program because : The computation `Program` consists of nested `Blocks`. Each `Block` will consist of data(i.e. `Variable`) and `Operations`. The concept to represent them is in the table below. -| |compile time|runtime| -|---|---|---| -|Data|VarDesc(proto)|Variable(cpp)| -|Operation|OpDesc(proto)|Operator(cpp)| + + + + + + + + + + + + + + + + + + + + +
compile timeruntime
Data VarDesc(proto) Variable(cpp)
Operation OpDesc(proto) Operator(cpp)
## Definition of VarType diff --git a/doc/fluid/design/concurrent/channel.md b/doc/fluid/design/concurrent/channel.md new file mode 100644 index 0000000000000000000000000000000000000000..df67438bcc741ac521b00ee962fc13c93db21182 --- /dev/null +++ b/doc/fluid/design/concurrent/channel.md @@ -0,0 +1,139 @@ +# Channel Design + +## Introduction + +A Channel is a data structure that allows for synchronous interprocess +communication via message passing. It is a fundemental component of CSP +(communicating sequential processes), and allows for users to pass data +between threads without having to worry about synchronization. + +## How to use it + +Paddle offers python APIs to open and close channels, along with sending +and receiving data to/from a channel. + +### Create a channel + +Creates a new channel that takes in variables of a specific dtype. + +- **fluid.make_channel(dtype, capacity=0)** + - **dtype**: The data type of variables being sent/received through channel + - **capacity**: The capacity of the channel. A capacity of 0 represents + an unbuffered channel. Capacity > 0 represents a buffered channel + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR, 10) +``` + +### Close a channel + +Closes a channel. Any pending senders and receivers will be awoken during +this time. Receivers can still receive from a closed channel, but senders +are not allowed to send any additional data to the channel (Paddle will +raise an exception if users try to send to a closed channel.) + +- **fluid.channel_close(channel)** + +``` +fluid.channel_close(ch) +``` + +### Send data to a channel + +Sends a variable to a channel. Currently, variables of dtype `LoDTensor`, +`LoDRankTable`, `LoDTensorArray`, `SelectedRows`, `ReaderHolder`, and +`ChannelHolder` are supported. + +By default, the data of the Variable is moved from the sender to the receiver, +however the user can optionally copy the data before performing the send. + +- **channel_send(channel, variable, is_copy=False)** + - **channel**: The channel to send the variable to + - **variable**: The variable to send to the channel + - **is_copy**: If set to True, channel_send will perform a variable assign + to copy the source variable to a new variable to be sent. + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=100) +fluid.channel_send(ch, var, True) +``` + +### Receive data from a channel + +Receives a variable from a channel. The data of the variable is moved to the +receiving variable. + +- **channel_recv(channel, return_variable)** + - **channel**: The channel to receive the variable from + - **return_variable**: The destination variable used to store the data of the + variable received from the channel + +``` +ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=-1) +fluid.channel_recv(ch, var) +``` + +## How it Works + +Channels provides a simple interface for different threads to share data. +To support the synchronization requirements, channels utilizes a series of +internal queues, locks, and conditional variables. + +### QueueMessage + +QueueMessage encapsulates the state of the channel send/receive operation to be +put in the **sendq/recvq**. It contains a condition variable used to lock the +thread (when there are no available sends/receives). In addition, it contains +a callback function to notify a thread when the QueueMessage is being +processed by the channel. + +### Queues + +- **buff_**: This queue holds the data buffer in a buffered channel. The +capacity is set to the capacity of the channel. This data buffer is not +used in an unbuffered channel. + +- **sendq**: This queue holds the QueueMessage of any pending senders of a +channel. When a thread performs a channel_send operation on the channel, the +channel_send operation will put a new QueueMessage on the sendq and block the +current thread under two conditions: + 1. The channel is buffered and is full + 2. The channel is unbuffered and does not have a receiver + +- **recvq**: This queue holds the QueueMessage of any pending receivers of a +channel. When a thread performs a channel_recv operation on the channel, the +channel_recv operation will put a new QueueMessage on the recvq and block the +current thread under two conditions: + 1. The channel is buffered and there is no data on the buff_ + 2. The channel is unbuffered and does not have a sender + +### State diagram + +#### Channel Send + +

+
+

+ +#### Channel Receive + +

+
+

+ +## Limitations and Considerations + +### Variable Copy + +In golang, variables in channels are copied from the sender to the receiver. +In Paddle, the data from our variables are **moved** from sender to receiver. +As a result, these variables should not be used after they are sent. We +provide a flag in channel_send method to allow users to copy the variable to +be sent before it is sent. + +Please note that this is acheived by adding an **assign** operator and creating +a temporary variable that is sent in place of the original variable. Please +note that **assign** operator has limited support for only certain variables +datatypes. diff --git a/doc/fluid/design/concurrent/concurrent_programming.md b/doc/fluid/design/concurrent/concurrent_programming.md index f022e67fd3a048cd7e53c91d9a1fd0506487b665..1859f983e9133674e69ecd506d7683ea926b2b8f 100644 --- a/doc/fluid/design/concurrent/concurrent_programming.md +++ b/doc/fluid/design/concurrent/concurrent_programming.md @@ -10,12 +10,42 @@ The answer relies on the fact that a `ProgramDesc` is similar to an abstract syn The following table compares concepts in Fluid and Go -| Go | Fluid | -|----|-------| -|user-defined functions | [layers](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid) | -| control-flow and built-in functions | [intrinsics/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) | -| goroutines, channels | [class ThreadPool](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework/thread_pool.h) | -| runtime | [class Executor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h) | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GoFluid
user-defined functions +layers
control-flow and built-in functions +intrinsics/operators
goroutines, channels +class ThreadPool
runtime +class Executor
+ ## An Example Concurrent Program @@ -77,11 +107,11 @@ message ProgramDesc { read(output = X) kube_get_workers_addrs(output = L) Y = tensor_array(len(L)) - parallel_for(input = X, output = Y, + parallel_for(input = X, output = Y, attrs = {L, block_id(1)}) # referring to block 1 ] } - + block[1] = Block { parent = 0, vars = [x, y, index], @@ -102,7 +132,7 @@ func main() { //// block 0 X = fluid.read(...) L = fluid.k8s.get_worker_addrs() Y = fluid.tensor_array(len(L)) - fluid.parallel_for(X, L, + fluid.parallel_for(X, L, func(index int) { //// block 1 x = X[index] fluid.send(L[index], x) @@ -116,7 +146,7 @@ An explanation of the above program: - `fluid.k8s` is a package that provides access to Kubernetes API. - `fluid.k8s.get_worker_addrs` returns the list of IP and ports of all pods of the current job except for the current one (the master pod). -- `fluid.tensor_array` creates a [tensor array](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor_array.h). `fluid.parallel_for` creates a `ParallelFor` intrinsic, which, when executed, +- `fluid.tensor_array` creates a [tensor array](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor_array.h). `fluid.parallel_for` creates a `ParallelFor` intrinsic, which, when executed, 1. creates `len(L)` scopes, each for the concurrent running of the sub-block (block 1 in this case), and initializes a variable named "index" in the scope to an integer value in the range `[0, len(L)-1]`, and 2. creates `len(L)` threads by calling into the `ThreadPool` singleton, each thread diff --git a/doc/fluid/design/concurrent/csp.md b/doc/fluid/design/concurrent/csp.md index 10d936860fab7e09241e968a63526c7d86d3e568..66d19f44baf861c7847e81ca83f61024ec877faf 100644 --- a/doc/fluid/design/concurrent/csp.md +++ b/doc/fluid/design/concurrent/csp.md @@ -13,14 +13,41 @@ Most DL systems, including TensorFlow, Caffe2, and MxNet, can asynchronously exe There were many concurrent programming models, implemented in various forms: -| concurrent programming model | implementation | -|-----|-----| -| mutex | types and functions in standard libraries | -| semaphore | types and functions in standard libraries | -| communicating sequential processes (CSP) | Go programming language | -| actor model | Erlang programming language | -| message passing | MPI | -| bulk synchronous parallel (BSP) | Pregel distributed programming framework | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
concurrent programming modelimplementation
mutex types and functions in standard libraries
semaphore types and functions in standard libraries
communicating sequential processes (CSP) Go programming language
actor model Erlang programming language
message passing MPI
bulk synchronous parallel (BSP) Pregel distributed programming framework
+ Since Fluid was designed to be a programming language, we would like to implement CSP in Fluid. @@ -118,9 +145,9 @@ There are four types of actions with a channel: ```go close(ch) ``` - + Please be aware that a closed channel is not a nil channel, which is `var ch chan int`. - + There are some [axioms with channels](https://dave.cheney.net/2014/03/19/channel-axioms): 1. A send to a nil channel blocks forever diff --git a/doc/fluid/design/concurrent/images/channel_recv.png b/doc/fluid/design/concurrent/images/channel_recv.png new file mode 100644 index 0000000000000000000000000000000000000000..c06cd15ae7b8a8c94d5742f6675e389081fcf789 Binary files /dev/null and b/doc/fluid/design/concurrent/images/channel_recv.png differ diff --git a/doc/fluid/design/concurrent/images/channel_send.png b/doc/fluid/design/concurrent/images/channel_send.png new file mode 100644 index 0000000000000000000000000000000000000000..006ebb4a5a4bcd32c97847e9fb7729a740255f7c Binary files /dev/null and b/doc/fluid/design/concurrent/images/channel_send.png differ diff --git a/doc/fluid/design/concurrent/index_cn.rst b/doc/fluid/design/concurrent/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e47135e9fc42760898083710e0a6767252a0225b --- /dev/null +++ b/doc/fluid/design/concurrent/index_cn.rst @@ -0,0 +1,8 @@ +并发编程 +------------ + +.. toctree:: + :maxdepth: 1 + + concurrent_programming.md + parallel_do.md diff --git a/doc/fluid/design/concurrent/index_en.rst b/doc/fluid/design/concurrent/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..0727e75798b2a869588f80d3cce7a886554e4ffb --- /dev/null +++ b/doc/fluid/design/concurrent/index_en.rst @@ -0,0 +1,8 @@ +Concurrent Programming +------------------------- + +.. toctree:: + :maxdepth: 1 + + concurrent_programming.md + parallel_do.md diff --git a/doc/fluid/design/concurrent/select_op.md b/doc/fluid/design/concurrent/select_op.md index 52c226bc94a4e8bfc5588705d7f65328840e91cc..4fcae57cc7932cdaebe549486e7f7cebf0bd038a 100644 --- a/doc/fluid/design/concurrent/select_op.md +++ b/doc/fluid/design/concurrent/select_op.md @@ -2,13 +2,13 @@ ## Introduction -In golang, the [**select**](https://golang.org/ref/spec#Select_statements) -statement lets a goroutine wait on multiple communication operations at the -same time. The **select** blocks until one of its cases can run, then -executes the case. If multiple cases are ready to run, then one case is +In golang, the [**select**](https://golang.org/ref/spec#Select_statements) +statement lets a goroutine wait on multiple communication operations at the +same time. The **select** blocks until one of its cases can run, then +executes the case. If multiple cases are ready to run, then one case is choosen at random to be executed. -With the introduction of CSP for Paddle, we mimic this behavior by +With the introduction of CSP for Paddle, we mimic this behavior by creating a ***select_op***. ## How to use it @@ -17,11 +17,11 @@ The **select_op** is available as a c++ operator. However most users will prefer to use the much simplier Python API. - **fluid.Select()**: Creates a select operator and adds it to the current -block within the main program. Also creates a sub block and adds it to the -main program. This sub block is used to hold all variables and operators +block within the main program. Also creates a sub block and adds it to the +main program. This sub block is used to hold all variables and operators used by the case statements. - -Within the select block, users can add cases by + +Within the select block, users can add cases by calling **select.case** or **select.default** method. - **fluid.Select.case(channel_action, channel, result_variable)**: Represents @@ -37,13 +37,13 @@ execute. ``` ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) - + x = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) y = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) - + while_cond = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) while_op = While(cond=while_cond) - + with while_op.block(): with fluid.Select() as select: with select.case(fluid.channel_send, channel, x): @@ -99,17 +99,17 @@ blocks { } } // Create "select" operator. - // inputs: + // inputs: // X: All input variables used by operators within the select block // case_to_execute: Variable filled in by select_op when it determines // which case to execute. // // outputs: - // Out: All output variables referenced by operators within select block. - // + // Out: All output variables referenced by operators within select block. + // // attrs: // sub_block: The block id containing the select "cases" - // cases: Serialized list of all cases in the select op. + // cases: Serialized list of all cases in the select op. // Each case is serialized as: ',,,' // where type is 0 for default, 1 for send, and 2 for receive. // No channel and values are needed for default cases. @@ -150,7 +150,7 @@ into **X**. It will also create a temp variable called **case_to_execute**. Th filled in by the select_op after it has completed processing the case statements. If there are no available cases to execute (ie: all cases are blocked on channel operations, and -there is no default statement), then the select_op will block the current thread. The thread will +there is no default statement), then the select_op will block the current thread. The thread will unblock once there is a channel operation affecting one of the case statements, at which point, the **select_op** will set the **case_to_execute** variable to the index of the case to execute. @@ -247,17 +247,17 @@ blocks { ``` -Cases are represented by a **conditional_block operator**, whose's condition is set as the output of -equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, +Cases are represented by a **conditional_block operator**, whose's condition is set as the output of +equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, only one case will be executed. ### select_op flow

-
+

-The select algorithm is inspired by golang's select routine. Please refer to +The select algorithm is inspired by golang's select routine. Please refer to http://www.tapirgames.com/blog/golang-concurrent-select-implementation for more information. ## Backward Pass diff --git a/doc/fluid/design/data_type/index_cn.rst b/doc/fluid/design/data_type/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b60167b6b1599df69dfc5073ebf32bdbb0a316ec --- /dev/null +++ b/doc/fluid/design/data_type/index_cn.rst @@ -0,0 +1,7 @@ +数据类型 +------------ + +.. toctree:: + :maxdepth: 1 + + float16.md diff --git a/doc/fluid/design/data_type/index_en.rst b/doc/fluid/design/data_type/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a88d17943f49134a2d00363845e919537ff4545 --- /dev/null +++ b/doc/fluid/design/data_type/index_en.rst @@ -0,0 +1,7 @@ +Data Type +------------ + +.. toctree:: + :maxdepth: 1 + + float16.md diff --git a/doc/fluid/design/dist_train/README.md b/doc/fluid/design/dist_train/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2dd652d8bdcb8f3b6e759347bd55b217be909386 --- /dev/null +++ b/doc/fluid/design/dist_train/README.md @@ -0,0 +1,57 @@ +## Distributed training overview doc + +Currently Paddle Fluid use parameter server architecture to support distributed training. + +For synchronous and asynchronous training, the differences are mostly in the logic of parameter server. Now we have already support synchronous training. + +### Synchronous training + +The training process of synchronous training is: + +![synchronous distributed training](./src/sync_distributed_training.png) + +1. Pserver + 1. set `barrier_condition_` to 0 and waits for trainers to send gradient. +1. Trainer + 1. Trainer read minibatch of data, run forward-backward with local parameter copy and get the gradients for parameters. + 1. Trainer use split op to split all the gradient into blocks. The split method is determined at compile time. + 1. Trainer use send_op to send all the split gradients to corresponding parameter server. + 1. After trainer send all the gradients, it will send a `BATCH_BARRIER_MESSAGE` to all pservers. + 1. Trainer call GetVariable to pserver and wait for `barrier_condition_` on pserver to be 1. +1. Pserver + 1. Pserver will count the number of `BATCH_BARRIER_MESSAGE`. + 1. When the count of `BATCH_BARRIER_MESSAGE` is equal to the number of Trainer. Pserver thinks it received all gradient from all trainers. + 1. Pserver will run the optimization block to optimize the parameters. + 1. After optimization, pserver set `barrier_condition_` to 1. + 1. Pserver wait for `FETCH_BARRIER_MESSAGE`. +1. Trainer. + 1. The trainer uses GetVariable to get all the parameters from pserver. + 1. Trainer sends a `FETCH_BARRIER_MESSAGE` to each pserver. +1. Pserver. + 1. when the number of `FETCH_BARRIER_MESSAGE` reach the number of all trainers. Pserver think all the parameters have been got. it will go back to 1. to set `barrier_condition_` to 0. + +### Asynchronous training +In the above process. There are two barriers for all trainers to synchronize with each other. In asynchronous training, these two barriers are not needed. The trainer can just send gradients to pserver and then get parameters back. + +The training process of asynchronous training can be: + +![asynchronous distributed training](./src/async_distributed_training.png) + +1. Pserver: + 1. Each parameter has a queue to receive its gradient from trainers. + 1. Each parameter has a thread to read data from the queue and run optimize block, using the gradient to optimize the parameter. + 1. Using an independent thread to handle RPC call `GetVariable` for trainers to get parameters back.(Maybe here we should use a thread pool to speed up fetching the parameters.) + +1. Trainer: + 1. Trainer read a batch of data. Run forward and backward with local parameter copy and get the gradients for parameters. + 1. Trainer split all gradients to blocks and then send these gradient blocks to pservers(pserver will put them into the queue). + 2. Trainer gets all parameters back from pserver. + +### Note: +There are also some conditions that need to consider. For exmaple: + +1. If trainer needs to wait for the pserver to apply it's gradient and then get back the parameters back. +1. If we need a lock between parameter update and parameter fetch. +1. If one parameter must be on one server, or it can also be split and send to multiple parameter servers. + +The above architecture of asynchronous training can support different mode, we can have a detailed test in the future for these problems. diff --git a/doc/fluid/design/dist_train/async_update.md b/doc/fluid/design/dist_train/async_update.md new file mode 100644 index 0000000000000000000000000000000000000000..6a0835b761b69030ba30697e6e8863928efbf57f --- /dev/null +++ b/doc/fluid/design/dist_train/async_update.md @@ -0,0 +1,58 @@ +# Design Doc: Asynchronous Update With Distributed Training + +## Background + +For the typical synchronous distributed training, some significant steps are as follows: + +1. A Trainer will compute the gradients and SEND them to the Parameter Server(PServer) nodes. +1. After the PServer node received gradients came from all the Trainers, It will aggregate the +gradient variables for the same parameter into one gradient variable and then apply the aggregated +gradient to the respective parameter, finally using an optimize algorithms(SGD, Monument...) +to update the parameters. +1. The Trainer would wait for the PServers finished the optimize stage, and GET the parameters from PServer, +so all the Trainers would get the same parameters. + +In the synchronously distributed training, there should be a `Barrier` to synchronise the +parameters after the optimizing stage. The performance of a distributed training job would +depend on the slowest node if there were hundreds or thousands of training nodes in a +Job, the performance of synchronously distributed training might be very poor because of +the slow node. So this design doc would introduce an approach to implement +*asynchronously* distributed training in PaddlePaddle Fluid. + +## Design + + + +As the figure above, we describe a global view of asynchronously update process and use +the parameter `w1` as an example to introduce the steps: +1. For each gradient variables, they may distribute on different GPU card and aggregate +them while they are all calculated. +1. Split the gradient variable into multiple blocks according to the number of PServer +instances and then send them. +1. PServer would run an `Optimize Block` using a specified optimize algorithm to update +the specified parameter. +1. The trainer will fetch latest parameter from PServer before running forward Op which depends +on the specified parameter. +1. Broadcast the received variable into multiple GPU cards and continue to run the next +mini-batch. + +### Trainer + +- For the multiple devices distributed training, we need to aggregate the gradient +variables which placed on different devices firstly and then schedule a `SendVars` Operator to +send the gradient variables to the multiple PServer instances. +- Schedule `FetchVars` operator to fetch the latest parameter from PServer before running +the forward ops. +- There could be a large number of gradient variables to be sent, so we need to use another +thread pool(IO Threadpool) whose a number of the schedulable threads is larger than the +computing thread pool to avoid competitive the thread resources with computing. + +### Parameter Server + + + +- There should be multiple trainer instances want to optimize the same parameter at +the same time, to avoid the racing, we need one `BlockingQueue` for each gradient +variable to process them one by one. +- We need a `Map` structure to map a gradient variable name to the `OptimizeBlock` which +can optimize the respective parameter. diff --git a/doc/fluid/design/dist_train/distributed_architecture.md b/doc/fluid/design/dist_train/distributed_architecture.md index a405cb6aaf80b9d2e8a1a9c774ca85cc7e62bbab..229cb47c17d633be6848bb35e58d33ec9b47ec3b 100644 --- a/doc/fluid/design/dist_train/distributed_architecture.md +++ b/doc/fluid/design/dist_train/distributed_architecture.md @@ -40,11 +40,11 @@ computation is only specified in Python code which sits outside of PaddlePaddle, Similar to how a compiler uses an intermediate representation (IR) so that the programmer does not need to manually optimize their code for most of the cases, we can have an intermediate representation in PaddlePaddle as well. The compiler optimizes the IR as follows: - + PaddlePaddle can support model parallelism by converting the IR so that the user no longer needs to manually perform the computation and operations in the Python component: - + The IR for PaddlePaddle after refactoring is called a `Block`, it specifies the computation dependency graph and the variables used in the computation. @@ -60,7 +60,7 @@ For a detailed explanation, refer to this document - The revamped distributed training architecture can address the above discussed limitations. Below is the illustration of how it does so: - + The major components are: *Python API*, *Distribute Transpiler* and *Remote Executor*. @@ -152,7 +152,7 @@ for data in train_reader(): `JobDesc` object describe the distributed job resource specification to run on Cluster environment. - + `RemoteExecutor.run` sends the `ProgramDesc` and [TrainingJob](https://github.com/PaddlePaddle/cloud/blob/unreleased-tpr/doc/autoscale/README.md#training-job-resource) @@ -171,7 +171,7 @@ In the future, a more general placement algorithm should be implemented, which m The local training architecture will be the same as the distributed training architecture, the difference is that everything runs locally, and there is just one PaddlePaddle runtime: - + ### Training Data diff --git a/doc/fluid/design/dist_train/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md index e543adf0f97cc6b47415b807d7a1ed1effec9b22..988729138926f035750b59eb245dde82502a3ad2 100644 --- a/doc/fluid/design/dist_train/distributed_lookup_table_design.md +++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md @@ -1,4 +1,4 @@ -## Design Doc: Distributed Lookup Table Operator +# Design Doc: Distributed Lookup Table Operator A lookup table operator in PaddlePaddle where the table could be out of the memory of a computer. diff --git a/doc/fluid/design/dist_train/index_cn.rst b/doc/fluid/design/dist_train/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed6f3dda271d2de58d92aa7ec804fa9e68dfc48a --- /dev/null +++ b/doc/fluid/design/dist_train/index_cn.rst @@ -0,0 +1,9 @@ +分布式训练 +------------ + +.. toctree:: + :maxdepth: 1 + + distributed_architecture.md + distributed_lookup_table_design.md + parameter_server.md diff --git a/doc/fluid/design/dist_train/index_en.rst b/doc/fluid/design/dist_train/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f84688f168021113bd933802709bcd787b474bca --- /dev/null +++ b/doc/fluid/design/dist_train/index_en.rst @@ -0,0 +1,9 @@ +Distributed Training +--------------------- + +.. toctree:: + :maxdepth: 1 + + distributed_architecture.md + distributed_lookup_table_design.md + parameter_server.md diff --git a/doc/fluid/design/dist_train/multi_cpu.md b/doc/fluid/design/dist_train/multi_cpu.md index a8d8ee0422acc84835170a44eb83f9b5f0c6bb40..38222d083084ebfca3099ce96b47868c42d55101 100644 --- a/doc/fluid/design/dist_train/multi_cpu.md +++ b/doc/fluid/design/dist_train/multi_cpu.md @@ -8,11 +8,11 @@ Op graph to a multi-CPU Op graph, and run `ParallelDo` Op to run the graph. ## Transpiler - + After converted: - + ## Implement diff --git a/doc/fluid/design/dist_train/parameter_server.md b/doc/fluid/design/dist_train/parameter_server.md index 6ce48dfbfce8b094684b412ebfda7e505ddc30ae..73c85da5e89eee0ac7857a0b808bc64ae673fdad 100644 --- a/doc/fluid/design/dist_train/parameter_server.md +++ b/doc/fluid/design/dist_train/parameter_server.md @@ -41,11 +41,11 @@ We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*. Below is an example of converting the user defined graph to the subgraphs for the trainer and the parameter server: - + After converting: - + 1. The parameter variable W and its optimizer program are placed on the parameter server. 1. Operators are added to the program. @@ -69,8 +69,7 @@ In Fluid, we introduce [SelectedRows](../selected_rows.md) to represent a list o non-zero gradient data. So when we do parameter optimization both locally and remotely, we only need to send those non-zero rows to the optimizer operators: - - + ### Benefits - Model parallelism becomes easier to implement: it is an extension to diff --git a/doc/fluid/design/dist_train/src/async_distributed_training.png b/doc/fluid/design/dist_train/src/async_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..3b53ab59c0cd7b44b2956f16f1adc47fe85909d3 Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_distributed_training.png differ diff --git a/doc/fluid/design/dist_train/src/async_pserver.graffle b/doc/fluid/design/dist_train/src/async_pserver.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d2301611774fcb3866473e3e6470568d1e1312cf Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_pserver.graffle differ diff --git a/doc/fluid/design/dist_train/src/async_pserver.png b/doc/fluid/design/dist_train/src/async_pserver.png new file mode 100644 index 0000000000000000000000000000000000000000..7d900b0c0eb291c67537b9cf93227c671bafdc73 Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_pserver.png differ diff --git a/doc/fluid/design/dist_train/src/async_update.graffle b/doc/fluid/design/dist_train/src/async_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..3a631888688a0d564a873fcb16d943958c91223e Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_update.graffle differ diff --git a/doc/fluid/design/dist_train/src/async_update.png b/doc/fluid/design/dist_train/src/async_update.png new file mode 100644 index 0000000000000000000000000000000000000000..3e8db973f45d6d9ac8dcce1dc7878067e79e6dcc Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_update.png differ diff --git a/doc/fluid/design/dist_train/src/distributed_training.graffle b/doc/fluid/design/dist_train/src/distributed_training.graffle new file mode 100644 index 0000000000000000000000000000000000000000..1168801bc1fadfce310a74cb3110695bd1629f6b Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_training.graffle differ diff --git a/doc/fluid/design/dist_train/src/sync_distributed_training.png b/doc/fluid/design/dist_train/src/sync_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..e4f9a221fea4b7238e8a1d84e609c0371f6ef7a2 Binary files /dev/null and b/doc/fluid/design/dist_train/src/sync_distributed_training.png differ diff --git a/doc/fluid/design/dynamic_rnn/index_cn.rst b/doc/fluid/design/dynamic_rnn/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d224d22cf7103616f44115db01f0ae55f1cb88a --- /dev/null +++ b/doc/fluid/design/dynamic_rnn/index_cn.rst @@ -0,0 +1,8 @@ +动态RNN +------------ + +.. toctree:: + :maxdepth: 1 + + rnn.md + rnn_design.md diff --git a/doc/fluid/design/dynamic_rnn/index_en.rst b/doc/fluid/design/dynamic_rnn/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..568f496e4ffe21a5e730488aef905f7e2d98839e --- /dev/null +++ b/doc/fluid/design/dynamic_rnn/index_en.rst @@ -0,0 +1,8 @@ +Dynamic RNN +------------ + +.. toctree:: + :maxdepth: 1 + + rnn.md + rnn_design.md diff --git a/doc/fluid/design/dynamic_rnn/rnn.md b/doc/fluid/design/dynamic_rnn/rnn.md index 6f414e5549b149bc88fb252085ff56dbb06730f8..7b61b050f640814d6949cf6847b431da53d59581 100644 --- a/doc/fluid/design/dynamic_rnn/rnn.md +++ b/doc/fluid/design/dynamic_rnn/rnn.md @@ -5,7 +5,7 @@ This document describes the RNN (Recurrent Neural Network) operator and how it i ## RNN Algorithm Implementation

- +

The above diagram shows an RNN unrolled into a full network. @@ -22,7 +22,7 @@ There are several important concepts here: There could be local variables defined in each step-net. PaddlePaddle runtime realizes these variables in *step-scopes* which are created for each step.

-
+
Figure 2 illustrates the RNN's data flow

@@ -93,7 +93,7 @@ For example, we could have a 2-level RNN, where the top level corresponds to par The following figure illustrates feeding in text into the lower level, one sentence at a step, and the feeding in step outputs to the top level. The final top level output is about the whole text.

- +

```python @@ -149,5 +149,5 @@ If the `output_all_steps` is set to False, it will only output the final time st

- +

diff --git a/doc/fluid/design/dynamic_rnn/rnn_design.md b/doc/fluid/design/dynamic_rnn/rnn_design.md index 3d38b9a0ad225fd8e0c1bb037474b292b1887f5b..cecfcd3307ae4c4fa603220a360e9e124069fa58 100644 --- a/doc/fluid/design/dynamic_rnn/rnn_design.md +++ b/doc/fluid/design/dynamic_rnn/rnn_design.md @@ -99,7 +99,7 @@ private: - 由于传递过程是以复制`shared_ptr`的方式实现,因此框架只需要传递一次 `lod_start_pos` 2. 对于不感知 `lod_start_pos` 的Op足够透明 -3. 需要修改 `lod_start_pos` 的producer Op可以在 `Run` 时更新自己的 `lod_start_pos` 数据 +3. 需要修改 `lod_start_pos` 的producer Op可以在 `Run` 时更新自己的 `lod_start_pos` 数据 具体的设计分为以下3小节 @@ -189,7 +189,7 @@ struct SortedSeqItem { std::vector sorted_seqs; ``` -来追踪序列排序后的位置,并添加一个新的接口 +来追踪序列排序后的位置,并添加一个新的接口 ```c++ std::vector SortBySeqLen(const LODTensor& tensor); @@ -233,7 +233,10 @@ x x - 将每个序列concat 为规则的mini-batch表示 ## 参考文献 -1. [Tensorflow Bucketing](https://www.tensorflow.org/versions/r0.12/api_docs/python/contrib.training/bucketing) -2. [mxnet Bucketing](http://mxnet.io/how_to/bucketing.html) -3. [variable length input in RNN scenario](https://discuss.pytorch.org/t/about-the-variable-length-input-in-rnn-scenario/345/5) -4. [Level of details](https://en.wikipedia.org/wiki/Level_of_detail) +[Tensorflow Bucketing](https://www.tensorflow.org/versions/r0.12/api_docs/python/contrib.training/bucketing) + +[mxnet Bucketing](http://mxnet.io/how_to/bucketing.html) + +[variable length input in RNN scenario](https://discuss.pytorch.org/t/about-the-variable-length-input-in-rnn-scenario/345/5) + +[Level of details](https://en.wikipedia.org/wiki/Level_of_detail) diff --git a/doc/fluid/design/execution/index_cn.rst b/doc/fluid/design/execution/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed31b017429d168b2466d8f6b423f48bd5d78d1f --- /dev/null +++ b/doc/fluid/design/execution/index_cn.rst @@ -0,0 +1,8 @@ +执行流程 +------------- + +.. toctree:: + :maxdepth: 1 + + switch.md + if_else_op.md diff --git a/doc/fluid/design/execution/index_en.rst b/doc/fluid/design/execution/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..fcf846da348ff0bed707c42718e08314998fbac0 --- /dev/null +++ b/doc/fluid/design/execution/index_en.rst @@ -0,0 +1,8 @@ +Execution Process +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + switch.md + if_else_op.md diff --git a/doc/fluid/design/execution/switch.md b/doc/fluid/design/execution/switch.md index 827d0601c621e4a230de28e2baad8e196e69625e..1c337bd7159b25e594c2f91f9a143b3f4bc3c8e8 100644 --- a/doc/fluid/design/execution/switch.md +++ b/doc/fluid/design/execution/switch.md @@ -1,6 +1,6 @@ -### Design Doc: Switch +# Design Doc: Switch -### Background +## Background Many programming languages provide `switch` as a generalization of `if-elif-else`. We want to add it to Fluid. @@ -19,7 +19,7 @@ with switch() as switch: fluid.print("Case 3") ``` -### The Semantics +## The Semantics 1. A `switch` control-flow checks cases one-by-one. 1. The condition of each case is a boolean value, which is a scalar, and differs from the `fluid.if_else` control-flow, which condition could be a vector of boolean values. diff --git a/doc/fluid/design/index_cn.rst b/doc/fluid/design/index_cn.rst index f1887be6901653d4263d711d78b626d2abfd45c9..e9f55214f411abb11bef180d7af4716ad85a0b09 100644 --- a/doc/fluid/design/index_cn.rst +++ b/doc/fluid/design/index_cn.rst @@ -1,2 +1,19 @@ 设计思想 ------------ + +.. toctree:: + :maxdepth: 1 + + motivation/index_cn.rst + execution/index_cn.rst + concepts/index_cn.rst + data_type/index_cn.rst + memory/index_cn.rst + muti_devices/index_cn.rst + dynamic_rnn/index_cn.rst + concurrent/index_cn.rst + algorithm/index_cn.rst + network/index_cn.rst + modules/index_cn.rst + interface/index_cn.rst + dist_train/index_cn.rst diff --git a/doc/fluid/design/index_en.rst b/doc/fluid/design/index_en.rst index 18a4b4122f6e3f0096676f34ffea8a80aa9b6696..2802dc3a31d540c5a19bf9042053496aad152f98 100644 --- a/doc/fluid/design/index_en.rst +++ b/doc/fluid/design/index_en.rst @@ -1,2 +1,19 @@ Design ------------ + +.. toctree:: + :maxdepth: 1 + + motivation/index_en.rst + execution/index_en.rst + concepts/index_en.rst + data_type/index_en.rst + memory/index_en.rst + muti_devices/index_en.rst + dynamic_rnn/index_en.rst + concurrent/index_en.rst + algorithm/index_en.rst + network/index_en.rst + modules/index_en.rst + interface/index_en.rst + dist_train/index_en.rst diff --git a/doc/fluid/design/interface/index_cn.rst b/doc/fluid/design/interface/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..69a8d9bad4fe88935b9fa87757abf0105ca8eb75 --- /dev/null +++ b/doc/fluid/design/interface/index_cn.rst @@ -0,0 +1,4 @@ +多语言接口 +------------ + +TBD diff --git a/doc/fluid/design/interface/index_en.rst b/doc/fluid/design/interface/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..22abc71f984aa5da7151d5ebf0c3bdbcc69a3624 --- /dev/null +++ b/doc/fluid/design/interface/index_en.rst @@ -0,0 +1,4 @@ +Multi-Language Interface +----------------------- + +TBD diff --git a/doc/fluid/design/memory/index_cn.rst b/doc/fluid/design/memory/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c507c638bd1a6eb428175ed2756a6ecfc6cca198 --- /dev/null +++ b/doc/fluid/design/memory/index_cn.rst @@ -0,0 +1,7 @@ +内存管理 +------------ + +.. toctree:: + :maxdepth: 1 + + memory_optimization.md diff --git a/doc/fluid/design/memory/index_en.rst b/doc/fluid/design/memory/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7526437a73a09b300f05e138084755f5528b242 --- /dev/null +++ b/doc/fluid/design/memory/index_en.rst @@ -0,0 +1,7 @@ +Memory Management +------------------- + +.. toctree:: + :maxdepth: 1 + + memory_optimization.md diff --git a/doc/fluid/design/modules/batch_norm_op.md b/doc/fluid/design/modules/batch_norm_op.md index d1392619c42d9206bf4bddcd33ad11b033e6cbdb..e451ffcc73b5de2b911e1c6de54b42a5d1d54c37 100644 --- a/doc/fluid/design/modules/batch_norm_op.md +++ b/doc/fluid/design/modules/batch_norm_op.md @@ -2,7 +2,7 @@ ## What is batch normalization -Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. +Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. The principle of batch normalization can be summarized into a simple function: @@ -66,7 +66,7 @@ As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attribu The following graph showes the training computational process of `batch_norm_op`: - + cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel. @@ -74,13 +74,13 @@ cudnn provides APIs to finish the whole series of computation, we can use them i `batch_norm_op` is warpped as a layer in Python: -```python -def batch_norm_layer(net, +```python +def batch_norm_layer(net, input, - output, - scale, - bias, - use_global_est = False, + output, + scale, + bias, + use_global_est = False, epsilon = 1e-6, momentum = 0.99): mean_cache = scope.new_var(name = 'estimated_mean', trainable = False) @@ -119,15 +119,15 @@ for pass_id in range(PASS_NUM): if pass_id % 100 == 0: net.infer(test_image) # run inferencing model # ... -``` +``` `is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`:
- +
-Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. +Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. When the net runs in training mode, the end of the left branch will be set as the running target, so the dependency tracking process will ignore right branch automatically. When the net runs in inferencing mode, the process is reversed. diff --git a/doc/fluid/design/modules/evaluator.md b/doc/fluid/design/modules/evaluator.md index 11cc129d56905a9ee666da92fbe6f8559c6d325a..de9605b0e67a035ab1ef1e4cafbe838f83bc5807 100644 --- a/doc/fluid/design/modules/evaluator.md +++ b/doc/fluid/design/modules/evaluator.md @@ -1,10 +1,10 @@ -## Evaluator Design +# Evaluator Design -### Problem Statement +## Problem Statement During training or inference, we provide an evaluation function to measure the model performance, for example, accuracy, precision, etc. In the operator based framework design, the data passes through the network pipeline batch by batch. As a result, inside the operator, we only calculate the metrics for one minibatch. Thus, we need to provide a mechanism to calculate the metrics for each N pass/batch the user wants. -### Evaluator Design +## Evaluator Design Currently, every operation is expressed in the graph. We divide the evaluator process into three steps. 1. Initialize the metric state and add it into the block. @@ -14,11 +14,11 @@ Currently, every operation is expressed in the graph. We divide the evaluator pr 3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices. -### Implementation -This design is shown in the Python API. -Each metric operator needs to caculate the metric statistic and return the batch-aware states. Python side is responsible for accumulating the states for each pass. +## Implementation +This design is shown in the Python API. +Each metric operator needs to caculate the metric statistic and return the batch-aware states. Python side is responsible for accumulating the states for each pass. + - ```python class Evaluator(object): """ @@ -32,7 +32,7 @@ class Evaluator(object): The initialization of Evaluator should be responsible for: create metric states and append to the main_program - """ + """ pass def _update_ops(self, input, label, **kwargs) @@ -40,14 +40,14 @@ class Evaluator(object): Add mini-batch evaluator caculate operators to the main_program. Add increment operator to accumulate the metric states. """ - + def reset(self, executor, reset_program=None): """ Reset metric states at the begin of each pass/user specified batch number. Execute the reset_program to reset the states. """ - + def eval(self, executor, eval_program=None): """ diff --git a/doc/fluid/design/modules/index_cn.rst b/doc/fluid/design/modules/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..b25783f0f5120991c29ba31b7b512bd4c183eecf --- /dev/null +++ b/doc/fluid/design/modules/index_cn.rst @@ -0,0 +1,14 @@ +代码结构和重要模块 +----------------- + +.. toctree:: + :maxdepth: 1 + + backward.md + python_api.md + regularization.md + infer_var_type.md + optimizer.md + prune.md + register_grad_op.md + net_op_design.md diff --git a/doc/fluid/design/modules/index_en.rst b/doc/fluid/design/modules/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..2108156e080996916f2650448f0a56f998757204 --- /dev/null +++ b/doc/fluid/design/modules/index_en.rst @@ -0,0 +1,14 @@ +Code Structure and Important Modules +------------------------------------- + +.. toctree:: + :maxdepth: 1 + + backward.md + python_api.md + regularization.md + infer_var_type.md + optimizer.md + prune.md + register_grad_op.md + net_op_design.md diff --git a/doc/fluid/design/modules/net_op_design.md b/doc/fluid/design/modules/net_op_design.md index a5f0483081e8a03b2d001a551fcc02bbd392016d..e64ac2fb1c6898bfeb883250347da3d9a4757b97 100644 --- a/doc/fluid/design/modules/net_op_design.md +++ b/doc/fluid/design/modules/net_op_design.md @@ -1,16 +1,16 @@ # Network Design `Network` is the container and controller of a set of operators, -user can build a real network from a `NetDesc` which is a protobuf message +user can build a real network from a `NetDesc` which is a protobuf message and use `Network.Run()` to run all the operators in the network. -A network object knows all Operators belonging to this network. Variables, -which are inputs and outputs of these operators, +A network object knows all Operators belonging to this network. Variables, +which are inputs and outputs of these operators, are created and managed by a hierarchy of Scope objects. -# API +## API -## Net +### Net To make the `Network` extendable, a base class is defined like this ```c++ @@ -43,8 +43,8 @@ class Net { }; ``` -All network implementations should build networks from a protobuf message which -describes the structure of a real network; `Run` method should be implemented by +All network implementations should build networks from a protobuf message which +describes the structure of a real network; `Run` method should be implemented by all implementations to offer a universal method to forward or backward compute a network. `Net::Create` is a method of factory pattern and can be implemented like @@ -64,7 +64,7 @@ std::unique Net::Create(const NetDesc& def) { ``` Network is designed as the container of operators. to make it more extendable, -we decouple it from the related variable resources. +we decouple it from the related variable resources. `Run(Scope* scope)` takes the scope as a argument so that it can run in different scopes. @@ -80,7 +80,7 @@ if (net) { } ``` -## `PlainNet` as a simple implementation of `BaseNet` +### `PlainNet` as a simple implementation of `BaseNet` A very basic implementation is as follows. All it does is simply to run every operators in sequence. @@ -211,9 +211,9 @@ class NetBuilder final { } ``` -## Compatibility with RNN +### Compatibility with RNN -Benefitting from the decoupling of `PlainNet.Run` and `Scope`, `PlainNet` is compatible with future RNN design, +Benefitting from the decoupling of `PlainNet.Run` and `Scope`, `PlainNet` is compatible with future RNN design, for example we can implement a simple recurrent neural network as follows ```c++ diff --git a/doc/fluid/design/modules/optimizer.md b/doc/fluid/design/modules/optimizer.md index 691081c268b848811bf5ee6d6a41edfe0f47eec0..1c25fde9cafb322f789662077d3fc6cc1d64ce38 100644 --- a/doc/fluid/design/modules/optimizer.md +++ b/doc/fluid/design/modules/optimizer.md @@ -1,6 +1,6 @@ -## Optimizer Design +# Optimizer Design -### The Problem +## The Problem A PaddlePaddle program, or a block, is a sequence of operators operating variables. A training program needs to do three kinds of works: @@ -19,7 +19,7 @@ It's true that users should be able to create all these operators manually by ca In this design, we propose a high-level API that automatically derives the optimisation pass and operators from the forward pass. -### High-level Python API to describe the training process +## High-level Python API to describe the training process 1. User write code to describe the network: @@ -54,7 +54,7 @@ In this design, we propose a high-level API that automatically derives the optim sess.run(target= opt_op_list, ...) ``` -#### Optimizer Python interface: +### Optimizer Python interface: ```python class Optimizer(object): diff --git a/doc/fluid/design/modules/python_api.md b/doc/fluid/design/modules/python_api.md index 73f6d7b90c7dca0d48109cf3d28d5f7cd56b5c0b..f83ad3b6a4e8b4d82d8fe8d4154a2739a9b9628b 100644 --- a/doc/fluid/design/modules/python_api.md +++ b/doc/fluid/design/modules/python_api.md @@ -2,12 +2,33 @@ Due to the refactorization of the PaddlePaddle core, we need Python classes to construct corresponding protobuf messages that describe a DL program. -| Python classes | Protobuf messages | -| --- | --- | -| Program | ProgramDesc | -| Block | BlockDesc | -| Operator | OpDesc | -| Variable | VarDesc | + + + + + + + + + + + + + + + + + + + + + + + + + +
Python classesProtobuf messages
Program ProgramDesc
Block BlockDesc
Operator OpDesc
Variable VarDesc
+ Please be aware that these Python classes need to maintain some construction-time information, which are not part of the protobuf messages. diff --git a/doc/fluid/design/modules/regularization.md b/doc/fluid/design/modules/regularization.md index 21280ac898feb4dd5e5a5d9e88d121e856850f0b..8cd5ff71d193f03e1ac923724b52f28c6057d25d 100644 --- a/doc/fluid/design/modules/regularization.md +++ b/doc/fluid/design/modules/regularization.md @@ -6,23 +6,23 @@ A central problem in machine learning is how to design an algorithm that will pe ### Parameter Norm Penalties Most common regularization approaches in deep learning are based on limiting the capacity of the models by adding a parameter norm penalty to the objective function `J`. This is given as follows: -
+
The parameter `alpha` is a hyperparameter that weights the relative contribution of the norm penalty term, `omega`, relative to the standard objective function `J`. The most commonly used norm penalties are the L2 norm penalty and the L1 norm penalty. These are given as follows: ##### L2 Regularization: -
+
##### L1 Regularization -
+
A much more detailed mathematical background of regularization can be found [here](http://www.deeplearningbook.org/contents/regularization.html). ## Regularization Survey -A detailed survey of regularization in various deep learning frameworks can be found [here](https://github.com/PaddlePaddle/Paddle/wiki/Regularization-Survey). +A detailed survey of regularization in various deep learning frameworks can be found [here](https://github.com/PaddlePaddle/Paddle/wiki/Regularization-Survey). ## Proposal for Regularization in PaddlePaddle @@ -32,41 +32,35 @@ In the new design, we propose to create new operations for regularization. For n - L2_regularization_op - L1_regularization_op -These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes other than L1 and L2 norm penalties. +These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes other than L1 and L2 norm penalties. -The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) in Python API. +The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) in Python API. ### Computation Graph Below is an example of a really simple feed forward neural network. -
+
The Python API will modify this computation graph to add regularization operators. The modified computation graph will look as follows: -
+
    ### Python API implementation for Regularization -Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. +Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. #### Creation of Regularization ops There are two possibilities for creating the regularization ops: -1. We create these ops immediately while building the computation graph. -2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. +1. We create these ops immediately while building the computation graph. +2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. -The proposal is to add these ops in a lazy manner just before the backward pass. +The proposal is to add these ops in a lazy manner just before the backward pass. #### Storage of Regularization attributes -Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the [`Parameter`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/framework.py#L421) class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. +Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the [`Parameter`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/framework.py#L421) class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. #### High-level API In PaddlePaddle Python API, users will primarily rely on [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) to create neural network layers. Hence, we also need to provide regularization functionality in layer functions. The design of these APIs can be postponed for later right now. A good reference for these APIs can be found in [Keras](https://keras.io/regularizers/) and also by looking at Tensorflow in [`tf.contrib.layers`](https://www.tensorflow.org/api_guides/python/contrib.layers). - - - - - - diff --git a/doc/fluid/design/motivation/fluid.md b/doc/fluid/design/motivation/fluid.md index 110b7d78bf12ac8328fb3a913e4386e75d63c995..4b7696cc1bbf57ace72c4d31ffc2bfe6c1071939 100644 --- a/doc/fluid/design/motivation/fluid.md +++ b/doc/fluid/design/motivation/fluid.md @@ -10,11 +10,37 @@ Fluid is the answer. Fluid is similar to PyTorch and TensorFlow Eager Execution Deep learning infrastructure is one of the fastest evolving technologies. Within four years, there have already been three generations of technologies invented. -| Existed since | model as sequence of layers | model as graph of operators | No model | -|--|--|--|--| -| 2013 | Caffe, Theano, Torch, PaddlePaddle | | | -| 2015 | | TensorFlow, MxNet, Caffe2, ONNX, n-graph | | -| 2016 | | | PyTorch, TensorFlow Eager Execution, PaddlePaddle Fluid | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Existed sincemodel as sequence of layersmodel as graph of operatorsNo model
2013 Caffe, Theano, Torch, PaddlePaddle
2015 TensorFlow, MxNet, Caffe2, ONNX, n-graph
2016 PyTorch, TensorFlow Eager Execution, PaddlePaddle Fluid
+ From the above table, we see that the deep learning technology is evolving towards getting rid of the concept of a model. To understand the reasons behind this direction, a comparison of the *programming paradigms* or the ways to program deep learning applications using these systems, would be helpful. The following section goes over these. @@ -93,7 +119,7 @@ An actual Fluid example is described [here](https://github.com/PaddlePaddle/Pad From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. -We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/if_else_op.md) structure of Fluid. +We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/execution/if_else_op.md) structure of Fluid. ## Turing Completeness diff --git a/doc/fluid/design/motivation/index_cn.rst b/doc/fluid/design/motivation/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..7706e73eca644ed6db772fd77da947395313237f --- /dev/null +++ b/doc/fluid/design/motivation/index_cn.rst @@ -0,0 +1,10 @@ +设计动机和目标 +------------- + +.. toctree:: + :maxdepth: 1 + + api.md + refactorization.md + fluid.md + fluid_compiler.md diff --git a/doc/fluid/design/motivation/index_en.rst b/doc/fluid/design/motivation/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..10b64b257c604ced6b957d6d6018e8a363f00fac --- /dev/null +++ b/doc/fluid/design/motivation/index_en.rst @@ -0,0 +1,10 @@ +Design Motivations and Goals +-------------------------------------- + +.. toctree:: + :maxdepth: 1 + + api.md + refactorization.md + fluid.md + fluid_compiler.md diff --git a/doc/fluid/design/motivation/refactorization.md b/doc/fluid/design/motivation/refactorization.md index f93d6155e1764386b01d2f0df3f141ab75cd55d4..f199cc892f5e84f0a12abe3b8e5cace9849e7fa8 100644 --- a/doc/fluid/design/motivation/refactorization.md +++ b/doc/fluid/design/motivation/refactorization.md @@ -36,11 +36,37 @@ At compile time, the Python program generates a protobuf message representation At runtime, the C++ program realizes the graph and runs it. -| | Representation (protobuf messages) | Realization (C++ class objects) | -|---|---|---| -|Data|[VarDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L107)|[Variable](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24)| -|Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)| -|Block|BlockDesc|Block| + + + + + + + + + + + + + + + + + + + + + + + + + +
Representation (protobuf messages)Realization (C++ class objects)
Data +VarDesc +Variable
Operation +OpDesc +Operator
Block BlockDesc Block
+ The word *graph* is interchangeable with *block* in this document. A graph consists of computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`). @@ -97,13 +123,13 @@ Compile Time -> IR -> Runtime --- -# Operator/OpWithKernel/OpKernel +## Operator/OpWithKernel/OpKernel ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/49caf1fb70820fb4a6c217634317c9306f361f36/op_op_with_kern_class_diagram.dot) --- -# Operator +## Operator ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot) * `Operator` is the fundamental building block of the user interface. @@ -113,7 +139,7 @@ Compile Time -> IR -> Runtime --- -# OpWithKernel/Kernel +## OpWithKernel/Kernel ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/9d7f4eba185cf41c8e2fbfb40ae21890dbddcd39/op_with_kernel.dot) @@ -124,7 +150,7 @@ Compile Time -> IR -> Runtime --- -# Why separate Kernel and Operator +## Why separate Kernel and Operator * Separate GPU and CPU code. * Make Paddle capable of running without GPU. @@ -132,7 +158,7 @@ Compile Time -> IR -> Runtime * For example, same multiplication op can have different implementations kernels such as FP16 kernel, FP32 kernel, MKL, eigen kernel. --- -# Libraries for Kernel development +## Libraries for Kernel development * `Eigen::Tensor` contains basic math and element-wise functions. * Note that `Eigen::Tensor` has broadcast implementation. @@ -143,16 +169,16 @@ Compile Time -> IR -> Runtime * Hand-writing `GPUKernel` and `CPU` code * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.) --- -# Operator Registration +## Operator Registration -## Why is registration necessary? +### Why is registration necessary? We need a method to build mappings between Op type names and Op classes. -## How is registration implemented? +### How is registration implemented? Maintaining a map, whose key is the type name and the value is the corresponding Op constructor. --- -# The Registry Map +## The Registry Map ### `OpInfoMap` @@ -166,7 +192,7 @@ Maintaining a map, whose key is the type name and the value is the corresponding - **`checker`**: Used to check attributes. --- -# Related Concepts +## Related Concepts ### Op_Maker It's constructor takes `proto` and `checker`. They are completed during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)) @@ -178,7 +204,7 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) ``` --- -# Registration Process +## Registration Process 1. Write an Op class and its gradient Op class, if required. 2. Write an Op maker class. In the constructor of this class, describe the inputs, outputs and attributes of the operator. 3. Invoke the macro `REGISTER_OP`. This macro will @@ -186,13 +212,13 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` --- -# Backward Module (1/2) +## Backward Module (1/2) ### Create Backward Operator - Mapping from forward Op to backward Op ![backward](https://gist.githubusercontent.com/dzhwinter/a6fbd4623ee76c459f7f94591fd1abf0/raw/61026ab6e518e66bde66a889bc42557a1fccff33/backward.png) --- -# Backward Module (2/2) +## Backward Module (2/2) ### Build Backward Network - **Input**: a graph of forward operators - **Output**: a graph of backward operators @@ -205,7 +231,7 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) --- -# Scope, Variable, Tensor +## Scope, Variable, Tensor * `Tensor` is an n-dimension array with type. * Only dims and data pointers are stored in `Tensor`. @@ -218,8 +244,8 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) * `Scope` has a hierarchical structure. The local scope can get variables from its parent scope. --- -# Block (in design) -## the difference between original RNNOp and Block +## Block (in design) +### the difference between original RNNOp and Block - As an operator is more intuitive than `RNNOp`, - Offers a new interface `Eval(targets)` to deduce the minimal block to `Run`, - Fits the compile-time/ runtime separation design paradigm. @@ -227,7 +253,7 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) - When graph executes, a Block with `BlockDesc` is passed. It then creates `Op` and `Var` instances and then invokes `Run`. --- -# Milestone +## Milestone - Take Paddle/books as the main line, the requirement of the models motivates framework refactoring, - Model migration - Framework development gives **priority support** to model migration, for example, @@ -240,7 +266,7 @@ REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) - Accept imperfection, concentrate on solving the specific problem at the right price. --- -# Control the migration quality +## Control the migration quality - Compare the performance of migrated models with old ones. - Follow the google C++ style guide. - Build the automatic workflow of generating Python/C++ documentations. diff --git a/doc/fluid/design/muti_devices/index_cn.rst b/doc/fluid/design/muti_devices/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..1f8439e8623e1c1ae9a12c24d08079f0ec3d761f --- /dev/null +++ b/doc/fluid/design/muti_devices/index_cn.rst @@ -0,0 +1,9 @@ +多设备支持 +------------ + +.. toctree:: + :maxdepth: 1 + + operator_kernel_type.md + kernel_selection.md + kernel_hint_design.md diff --git a/doc/fluid/design/muti_devices/index_en.rst b/doc/fluid/design/muti_devices/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..819e9c5d77b2abf8da0e2ce6f494ea5174c1d0a2 --- /dev/null +++ b/doc/fluid/design/muti_devices/index_en.rst @@ -0,0 +1,9 @@ +Multi-Device Support +---------------------- + +.. toctree:: + :maxdepth: 1 + + operator_kernel_type.md + kernel_selection.md + kernel_hint_design.md diff --git a/doc/fluid/design/muti_devices/kernel_hint_design.md b/doc/fluid/design/muti_devices/kernel_hint_design.md index a54b7da045e1a362626ef066f9ebb56af2c3181a..58e44b64169d8c942174de86986403570b271641 100644 --- a/doc/fluid/design/muti_devices/kernel_hint_design.md +++ b/doc/fluid/design/muti_devices/kernel_hint_design.md @@ -1,3 +1,5 @@ +# Kernel Hint Design + ## Problem In PaddlePaddle's [Design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md), one Operator may have multiple kernels. Users may have some personal preference to choose a certain type of kernel for an operator, such as `force_cpu` to choose a CPU kernel, `use_cudnn` to choose a CUDNN kernel, we need to provide a way for users to do this. diff --git a/doc/fluid/design/muti_devices/kernel_selection.md b/doc/fluid/design/muti_devices/kernel_selection.md index 9719e031c70979cd95400701efd30879662e19bc..967317d5d2eeb818ab14faabca342cc8c4ed717e 100644 --- a/doc/fluid/design/muti_devices/kernel_selection.md +++ b/doc/fluid/design/muti_devices/kernel_selection.md @@ -1,3 +1,5 @@ +# Kernel Selection + ## Background Every operator has many kernels because there are multiple data types, places, data layout, library type that Fluid supports. We use the `OpKernelType ` to describe kernel types that operators can hold. diff --git a/doc/fluid/design/network/deep_speech_2.md b/doc/fluid/design/network/deep_speech_2.md index af0c6ef36feba9e0239e7a5f81a8dc9108b2471a..f32a5b7e8a4d820319a666dab4c3129360e2c924 100644 --- a/doc/fluid/design/network/deep_speech_2.md +++ b/doc/fluid/design/network/deep_speech_2.md @@ -1,4 +1,4 @@ -# DeepSpeech2 on PaddlePaddle: Design Doc +# DeepSpeech2 on PaddlePaddle: Design Doc We are planning to build Deep Speech 2 (DS2) \[[1](#references)\], a powerful Automatic Speech Recognition (ASR) engine, on PaddlePaddle. For the first-stage plan, we have the following short-term goals: @@ -68,11 +68,33 @@ We roughly break down the project into 14 tasks: Tasks parallelizable within phases: -Roadmap | Description | Parallelizable Tasks ------------ | :------------------------------------ | :-------------------- -Phase I | Simplified model & components | *Task 1* ~ *Task 8* -Phase II | Standard model & benchmarking & profiling | *Task 9* ~ *Task 12* -Phase III | Documentations | *Task13* ~ *Task14* + + + + + + + + + + + + + + + + + + + + + + + + + +
RoadmapDescription Parallelizable Tasks
Phase I Simplified model & components Task 1 ~ Task 8
Phase II Standard model & benchmarking & profilingTask 9 ~ Task 12
Phase III Documentations Task13 ~ Task14
+ Issue for each task will be created later. Contributions, discussions and comments are all highly appreciated and welcomed! @@ -94,7 +116,7 @@ The classical DS2 network contains 15 layers (from bottom to top): - **One** CTC-loss layer
-
+
Figure 1. Archetecture of Deep Speech 2 Network.
@@ -102,37 +124,82 @@ We don't have to persist on this 2-3-7-1-1-1 depth \[[2](#references)\]. Similar Key ingredients about the layers: -- **Data Layers**: +- **Data Layers**: - Frame sequences data of audio **spectrogram** (with FFT). - - Token sequences data of **transcription** text (labels). + - Token sequences data of **transcription** text (labels). - These two type of sequences do not have the same lengthes, thus a CTC-loss layer is required. -- **2D Convolution Layers**: +- **2D Convolution Layers**: - Not only temporal convolution, but also **frequency convolution**. Like a 2D image convolution, but with a variable dimension (i.e. temporal dimension). - With striding for only the first convlution layer. - No pooling for all convolution layers. -- **Uni-directional RNNs** +- **Uni-directional RNNs** - Uni-directional + row convolution: for low-latency inference. - Bi-direcitional + without row convolution: if we don't care about the inference latency. - **Row convolution**: - For looking only a few steps ahead into the feature, instead of looking into a whole sequence in bi-directional RNNs. - - Not nessesary if with bi-direcitional RNNs. + - Not nessesary if with bi-direcitional RNNs. - "**Row**" means convolutions are done within each frequency dimension (row), and no convolution kernels shared across. - **Batch Normalization Layers**: - Added to all above layers (except for data and loss layer). - Sequence-wise normalization for RNNs: BatchNorm only performed on input-state projection and not state-state projection, for efficiency consideration. - - -Required Components | PaddlePaddle Support | Need to Develop -:------------------------------------- | :-------------------------------------- | :----------------------- -Data Layer I (Spectrogram) | Not supported yet. | TBD (Task 3) -Data Layer II (Transcription) | `paddle.data_type.integer_value_sequence` | - -2D Convolution Layer | `paddle.layer.image_conv_layer` | - -DataType Converter (vec2seq) | `paddle.layer.block_expand` | - -Bi-/Uni-directional RNNs | `paddle.layer.recurrent_group` | - -Row Convolution Layer | Not supported yet. | TBD (Task 4) -CTC-loss Layer | `paddle.layer.warp_ctc` | - -Batch Normalization Layer | `paddle.layer.batch_norm` | - -CTC-Beam search | Not supported yet. | TBD (Task 6) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Required Components PaddlePaddle Support Need to Develop
Data Layer I (Spectrogram) Not supported yet.TBD (Task 3)
Data Layer II (Transcription) paddle.data_type.integer_value_sequence -
2D Convolution Layer paddle.layer.image_conv_layer -
DataType Converter (vec2seq) paddle.layer.block_expand -
Bi-/Uni-directional RNNs paddle.layer.recurrent_group -
Row Convolution Layer Not supported yet.TBD (Task 4)
CTC-loss Layer paddle.layer.warp_ctc -
Batch Normalization Layer paddle.layer.batch_norm -
CTC-Beam search Not supported yet. TBD (Task 6)
+ ### Row Convolution @@ -141,18 +208,18 @@ TODO by Assignees ### Beam Search with CTC and LM
-
+
Figure 2. Algorithm for CTC Beam Search Decoder.
-- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] as shown in Figure 2, with two important modifications for the ambiguous parts: - - 1) in the iterative computation of probabilities, the assignment operation is changed to accumulation for one prefix may comes from different paths; +- The **Beam Search Decoder** for DS2 CTC-trained network follows the similar approach in \[[3](#references)\] as shown in Figure 2, with two important modifications for the ambiguous parts: + - 1) in the iterative computation of probabilities, the assignment operation is changed to accumulation for one prefix may comes from different paths; - 2) the if condition ```if l^+ not in A_prev then``` after probabilities' computation is deprecated for it is hard to understand and seems unnecessary. - An **external scorer** would be passed into the decoder to evaluate a candidate prefix during decoding whenever a white space appended in English decoding and any character appended in Mandarin decoding. - Such external scorer consists of language model, word count or any other custom scorers. - The **language model** is built from Task 5, with parameters should be carefully tuned to achieve minimum WER/CER (c.f. Task 7) -- This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. - +- This decoder needs to perform with **high efficiency** for the convenience of parameters tuning and speech recognition in reality. + ## Future Work diff --git a/doc/fluid/design/network/index_cn.rst b/doc/fluid/design/network/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3557d55fe4dbae1f712e0760ca15111ec6f6792d --- /dev/null +++ b/doc/fluid/design/network/index_cn.rst @@ -0,0 +1,7 @@ +复杂网络设计 +------------ + +.. toctree:: + :maxdepth: 1 + + sequence_decoder.md diff --git a/doc/fluid/design/network/index_en.rst b/doc/fluid/design/network/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..73a7137236bdf0548d35721609351d6deca3013b --- /dev/null +++ b/doc/fluid/design/network/index_en.rst @@ -0,0 +1,7 @@ +Complex Network Design +------------------------ + +.. toctree:: + :maxdepth: 1 + + sequence_decoder.md diff --git a/doc/fluid/design/network/sequence_decoder.md b/doc/fluid/design/network/sequence_decoder.md index c4a9bbeeefca0e05c335dd60233691e8bac33015..f13d30ca9fe09c9525c711436f605bb280e11000 100644 --- a/doc/fluid/design/network/sequence_decoder.md +++ b/doc/fluid/design/network/sequence_decoder.md @@ -199,7 +199,7 @@ Packing the `selected_generation_scores` will get a `LoDTensor`, and each tail i ## LoD and shape changes during decoding

- +

According to the image above, the only phase that changes the LoD is beam search. diff --git a/doc/fluid/design/others/gan_api.md b/doc/fluid/design/others/gan_api.md index fb41df8615f73d9fd4c32995eab265833eac1a55..7167470088766985fa5ad31657410309330fd725 100644 --- a/doc/fluid/design/others/gan_api.md +++ b/doc/fluid/design/others/gan_api.md @@ -1,24 +1,24 @@ # Design for GAN -GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. +GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. It applies several important concepts in machine learning system design, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.

-
+
Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.

The operators, layers and functions required/optional to build a GAN demo is summarized in https://github.com/PaddlePaddle/Paddle/issues/4563.

-
+
Figure 2. Photo borrowed from the original DC-GAN paper.

-## The Conditional-GAN might be a class. +## The Conditional-GAN might be a class. This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: - DCGAN(object): which contains everything required to build a GAN model. It provides following member functions methods as API: @@ -29,7 +29,7 @@ This design we adopt the popular open source design in https://github.com/carped Returns a generated image. - discriminator(image): -Given an image, decide if it is from a real source or a fake one. +Given an image, decide if it is from a real source or a fake one. Returns a 0/1 binary label. - build_model(self): @@ -47,7 +47,7 @@ To be more detailed, we introduce our design of DCGAN as following: ```python class DCGAN(object): def __init__(self, y_dim=None): - + # hyper parameters self.y_dim = y_dim # conditional gan or not self.batch_size = 100 @@ -82,18 +82,18 @@ class DCGAN(object): # input z: the random noise # input y: input data label (optional) # output G_im: generated fake images - + if not self.y_dim: z = pd.layer.concat(1, [z, y]) - + G_h0 = pd.layer.fc(z, self.G_w0, self.G_b0) G_h0_bn = pd.layer.batch_norm(G_h0) G_h0_relu = pd.layer.relu(G_h0_bn) - + G_h1 = pd.layer.deconv(G_h0_relu, self.G_w1, self.G_b1) G_h1_bn = pd.layer.batch_norm(G_h1) G_h1_relu = pd.layer.relu(G_h1_bn) - + G_h2 = pd.layer.deconv(G_h1_relu, self.G_W2, self.G_b2)) G_im = pd.layer.tanh(G_im) return G_im @@ -111,11 +111,11 @@ class DCGAN(object): D_h0 = pd.layer.conv2d(image, w=self.D_w0, b=self.D_b0) D_h0_bn = pd.layer.batchnorm(h0) D_h0_relu = pd.layer.lrelu(h0_bn) - + D_h1 = pd.layer.conv2d(D_h0_relu, w=self.D_w1, b=self.D_b1) D_h1_bn = pd.layer.batchnorm(D_h1) D_h1_relu = pd.layer.lrelu(D_h1_bn) - + D_h2 = pd.layer.fc(D_h1_relu, w=self.D_w2, b=self.D_b2) return D_h2 ``` @@ -123,7 +123,7 @@ class DCGAN(object): ### Class member function: Build the model - Define data readers as placeholders to hold the data; - Build generator and discriminators; -- Define two training losses for discriminator and generator, respectively. +- Define two training losses for discriminator and generator, respectively. If we have execution dependency engine to back-trace all tensors, the module building our GAN model will be like this: ```python class DCGAN(object): @@ -133,7 +133,7 @@ class DCGAN(object): self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) self.z = pd.data(tf.float32, [None, self.z_size]) - + # step 1: generate images by generator, classify real/fake images with discriminator if self.y_dim: # if conditional GAN, includes label self.G = self.generator(self.z, self.y) @@ -147,12 +147,12 @@ class DCGAN(object): # generate fake images self.sampled = self.sampler(self.z) self.D_f = self.discriminator(self.images) - + # step 2: define the two losses self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) self.d_loss = self.d_loss_real + self.d_loss_fake - + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) ``` @@ -176,7 +176,7 @@ class DCGAN(object): self.G = self.generator(self.z) self.D_g = self.discriminator(self.G, self.y) self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_g, np.ones(self.batch_szie)) - + with pd.default_block().d_block(): if self.y_dim: # if conditional GAN, includes label self.D_t = self.discriminator(self.images, self.y) @@ -217,7 +217,7 @@ if __name__ == "__main__": # load mnist data data_X, data_y = self.load_mnist() - + # Two subgraphs required!!! with pd.block().d_block(): d_optim = pd.train.Adam(lr = .001, beta= .1) @@ -228,7 +228,7 @@ if __name__ == "__main__": # executor sess = pd.executor() - + # training for epoch in xrange(10000): for batch_id in range(N / batch_size): @@ -239,7 +239,7 @@ if __name__ == "__main__": batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) if batch_id % 2 == 0: - sess.run(d_step, + sess.run(d_step, feed_dict = {dcgan.images: batch_im, dcgan.y: batch_label, dcgan.z: batch_z}) diff --git a/doc/fluid/dev/api_doc_std_cn.md b/doc/fluid/dev/api_doc_std_cn.md index 5596b2653ae6ed9917f77dad08f926bcb1fb3419..b50f18f21df0787b9761bf0935ed7f4384ff0f98 100644 --- a/doc/fluid/dev/api_doc_std_cn.md +++ b/doc/fluid/dev/api_doc_std_cn.md @@ -45,11 +45,11 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 - Python API Definition - 格式: - + [Python API Definition] - + - 示例 - + ``` fc(input, size, @@ -63,19 +63,19 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 ``` - Function Description - + - 格式 本模块应包含以下内容(排列顺序为文档撰写顺序): [Function Description] - + [Formula] - + [Symbols' Descriptions if necessary] - + [References if necessary] - + - 示例 [Function Description] @@ -119,18 +119,18 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 [References if necessary] 因fc没有必要列出的参考文献,故该内容省略。其他情况下需明确给出对应的参考文献和对应连接,以 layer_norm 为例: - + ``` Refer to `Layer Normalization `_ for more details. ``` - + - Args Description - + - 格式 - + \[Arg's Name\][(Data Type, Default Value)][Description] - + - 示例 fc的部分参数注释如下: @@ -145,35 +145,35 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 ``` - Returns - + - 格式 - + [Name][Shape] - + - 示例 - + ``` Returns: A tensor variable storing the transformation result. ``` - + 当返回值为包含多个参数的tuple时,应按顺序逐个介绍各参数,以dynamic_lstm为例: - + ``` Returns: A tuple containing: The hidden state of LSTM whose shape is (T X D). The cell state of LSTM whose shape is (T X D). ``` - + - Raises - 格式 - + [Exception Type][Condition] - 示例 - + ``` Raises: ValueError: If the rank of the input is less than 2. @@ -182,7 +182,7 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 - Note - 格式 - + [Note] - 示例 @@ -198,15 +198,15 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 2. When num_heads == 1, scaled_dot_product_attention has no learnable parameters. ``` - + - Examples - 格式 \[Python Code Snipper] - + - 示例 - + ``` Examples: .. code-block:: python diff --git a/doc/fluid/dev/api_doc_std_en.md b/doc/fluid/dev/api_doc_std_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e57072d52fd162e92a3482aef33f99ab9394c532 --- /dev/null +++ b/doc/fluid/dev/api_doc_std_en.md @@ -0,0 +1,226 @@ +# API Doc Standard + +- [API Doc Structure](#API Doc Structure) +- [Format and Examples](#Format and Examples) +- [Complete Example](#Complete Example) + + +## API Doc Structure + +API Doc should contain the following parts(please write them in order): + +- Python API Definition + + The definition of API + +- Function Description + + Description of API's function. + The description includes: meaning, purpose and operation on input of API, reference and corresponding link(if any), formula(if necessary) and explanations of key variables in the formula. + +- Args Description + + Description of API parameters. + Introduce parameters one by one according to the order in API definition. + The introduction includes: data type, default value(if any), meaning, etc. + +- Returns + + Introduction of API returned value. + Introduce meaning of returned value, provide correspoding format if necessary. + If returned value is a tuple containing multiple parameters, then introduce parameters one by one in order. + +- Raises(if any) + + Abnormality, error that may occur, and possible reasons. If there are more than one possible abnormity or error, they should be listed in order. + +- Note(if any) + + Matters needing attention. If there are more than one matters, they should be listed in order. + +- Examples + + Examples of how to use API. + + +## Format and Examples + +API documentation must obey reStructuredText format, please refer to [here](http://sphinx-doc-zh.readthedocs.io/en/latest/rest.html). +Format and examples of each part of API documantation are as follows: (take fc for example) + +- Python API Definition + + - Format + + [Python API Definition] + + - Example + + ``` + fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None, + main_program=None, + startup_program=None) + ``` + +- Function Description + + - Format + + This part contains (please write them in order): + + [Function Description] + + [Formula] + + [Symbols' Descriptions if necessary] + + [References if necessary] + + - Example + + [Function Description] + + ``` + **Fully Connected Layer** + + The fully connected layer can take multiple tensors as its inputs. It + creates a variable called weights for each input tensor, which represents + a fully connected weight matrix from each input unit to each output unit. + The fully connected layer multiplies each input tensor with its coresponding + weight to produce an output Tensor. If multiple input tensors are given, + the results of multiple multiplications will be sumed up. If bias_attr is + not None, a bias variable will be created and added to the output. Finally, + if activation is not None, it will be applied to the output as well. + ``` + + [Formula] + + ``` + This process can be formulated as follows: + + .. math:: + + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) + ``` + + [Symbols' Descriptions if necessary] + + ``` + In the above equation: + + * :math:`N`: Number of the input. + * :math:`X_i`: The input tensor. + * :math:`W`: The weights created by this layer. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation function. + * :math:`Out`: The output tensor. + ``` + + [References if necessary] + + Since there is no need for reference of fc, we omit them here. Under other circumstances, please provide explicit reference and link, take layer_norm for example: + + ``` + Refer to `Layer Normalization `_ for more details. + ``` + + +- Args Description + + - Format + + \[Arg's Name\][(Data Type, Default Value)][Description] + + - Example + + part of fc parameters are as follows: + + ``` + Args: + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + name (str, default None): The name of this layer. + ``` + +- Returns + + - Format + + [Name][Shape] + + - Example + + ``` + Returns: + A tensor variable storing the transformation result. + ``` + + when returned value contain more than one tuple, please introduce every parameter in order, take dynamic_lstm for example: + + ``` + Returns: + A tuple containing: + The hidden state of LSTM whose shape is (T X D). + The cell state of LSTM whose shape is (T X D). + ``` + +- Raises + + - Format + + [Exception Type][Condition] + + - Example + + ``` + Raises: + ValueError: If the rank of the input is less than 2. + ``` + +- Note + + - Format + + [Note] + + - Example + + there is no Note in fc, so we omit this part. If there is any note, please write clearly. If there are more than one notes, please list them in order. Take scaled\_dot\_product\_attention for example: + + ``` + Note: + 1. When num_heads > 1, three linear projections are learned respectively + to map input queries, keys and values into queries', keys' and values'. + queries', keys' and values' have the same shapes with queries, keys + and values. + 2. When num_heads == 1, scaled_dot_product_attention has no learnable + parameters. + ``` + +- Examples + + - Format + + \[Python Code Snipper] + + - Example + + ``` + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + ``` + +## Complete Example + +Complete Example of fc please see [here](src/fc.py)。 diff --git a/doc/fluid/dev/index_cn.rst b/doc/fluid/dev/index_cn.rst index e1edf079fa0f85eb7f6709fd945fffae88625d01..ad798003f560e7fb0e6db6083fdd152fd3417584 100644 --- a/doc/fluid/dev/index_cn.rst +++ b/doc/fluid/dev/index_cn.rst @@ -1,2 +1,14 @@ 开发标准 ------------ + +.. toctree:: + :maxdepth: 1 + + api_doc_std_cn.md + new_op_cn.md + new_op_kernel.md + use_eigen_cn.md + name_convention.md + support_new_device.md + releasing_process_cn.md + op_markdown_format.md diff --git a/doc/fluid/dev/index_en.rst b/doc/fluid/dev/index_en.rst index faf9dfcd315fddc4774c3717b41086fa6c6bf85a..80c899a82fa452c5cd8f38dad89c15d3041b09e3 100644 --- a/doc/fluid/dev/index_en.rst +++ b/doc/fluid/dev/index_en.rst @@ -1,4 +1,14 @@ Development ------------ -This is Development page +.. toctree:: + :maxdepth: 1 + + api_doc_std_en.md + new_op_en.md + new_op_kernel.md + use_eigen_en.md + name_convention.md + support_new_device.md + releasing_process_en.md + op_markdown_format.md diff --git a/doc/fluid/dev/name_convention.md b/doc/fluid/dev/name_convention.md index a02b356f058da68442516c2705d0bac140f8ef18..75830ef28c67dc4694d899efe503084b7b5852e1 100644 --- a/doc/fluid/dev/name_convention.md +++ b/doc/fluid/dev/name_convention.md @@ -1,8 +1,8 @@ -## Operator's Parameter Name Convention +# Operator's Parameter Name Convention To make the operator document itself more clear, we recommend operator names obey the listing conventions. -### OpProtoMaker names +## OpProtoMaker names When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. @@ -20,7 +20,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - Order. - Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice. -### Best Practice +## Best Practice Here we give some examples to show how these rules will be used. diff --git a/doc/fluid/dev/new_op_cn.md b/doc/fluid/dev/new_op_cn.md index 92996585674b46f45549b972b9f295503b1c7f8c..0c3f88d9c31e05bec399c64bf6ade56e62e01f68 100644 --- a/doc/fluid/dev/new_op_cn.md +++ b/doc/fluid/dev/new_op_cn.md @@ -26,13 +26,32 @@ 依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorWithKernel`,后者继承自`OperatorBase`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: - - 内容 | 定义位置 --------------- | :---------------------- -OpProtoMake定义 | `.cc`文件,Backward Op不需要定义OpProtoMake -Op定义 | `.cc`文件 -Kernel实现 | CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,CUDA 实现在`.cu`文件中。 -注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,CUDA实现在`.cu`文件中 + + + + + + + + + + + + + + + + + + + + + + + + + +
内容定义位置
OpProtoMake定义 `.cc`文件,Backward Op不需要定义OpProtoMake
Op定义 `.cc`文件
Kernel实现 CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,CUDA 实现在`.cu`文件中。
注册Op Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,CUDA实现在`.cu`文件中
实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。** diff --git a/doc/fluid/dev/new_op_en.md b/doc/fluid/dev/new_op_en.md index da8b1bdd1082e439456daf25e9b3a1e8eb534375..a566a09131f86251b70d5435d0a483aa2a705b35 100644 --- a/doc/fluid/dev/new_op_en.md +++ b/doc/fluid/dev/new_op_en.md @@ -33,6 +33,33 @@ Op definition | `.cc` files Kernel implementation | The kernel methods shared between CPU and CUDA are defined in `.h` files. CPU-specific kernels live in `.cc` files, while CUDA-specific kernels are implemented in `.cu`files. Registering the Op | Ops are registered in `.cc` files; For Kernel registration, `.cc` files contain the CPU implementation, while `.cu` files contain the CUDA implementation. + + + + + + + + + + + + + + + + + + + + + + + + + +
Information Where is it defined
OpProtoMake definition `.cc`files, Backward Op does not need an OpProtoMake interface.
Op definition `.cc` files
Kernel implementation The kernel methods shared between CPU and CUDA are defined in `.h` files. CPU-specific kernels live in `.cc` files, while CUDA-specific kernels are implemented in `.cu`files.
Registering the Op Ops are registered in `.cc` files; For Kernel registration, `.cc` files contain the CPU implementation, while `.cu` files contain the CUDA implementation.
+ New Operator implementations are added to the list [paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators), with file names in the format `*_op.h` (if applicable), `*_op.cc`, `*_op.cu` (if applicable).** The system will use the naming scheme to automatically build operators and their corresponding Python extensions.** @@ -279,7 +306,7 @@ A forward operator unit test inherits `unittest.TestCase` and defines metaclass def test_check_output(self): self.check_output() - + def test_check_grad_normal(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) diff --git a/doc/fluid/dev/new_op_kernel_en.md b/doc/fluid/dev/new_op_kernel.md similarity index 88% rename from doc/fluid/dev/new_op_kernel_en.md rename to doc/fluid/dev/new_op_kernel.md index 123df0a7ee4943c0b789ef9cfa6e0804d0fdd564..55dea8d0a39232ede59d4663d6e1a47fbfc60853 100644 --- a/doc/fluid/dev/new_op_kernel_en.md +++ b/doc/fluid/dev/new_op_kernel.md @@ -1,14 +1,14 @@ -## Add Kernels for a New Device +# Add Kernels for a New Device -### Background +## Background PaddlePaddle Fluid have hundreds of operators. Each operator could have one or more kernels. A kernel is an implementation of the operator for a certain device, which could be a hardware device, e.g., the CUDA GPU, or a library that utilizes a device, e.g., Intel MKL that makes full use of the Xeon CPU. [This document](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_en.md) explains how to add an operator, and its kernels. The kernels of an operator are indexed by a C++ type [`OpKernelType`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/operator_kernel_type.md). An operator chooses the right kernel at runtime. This choosing mechanism is described [here](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md). -### Write Kernels for A New Device +## Write Kernels for A New Device -#### Add A New Device +### Add A New Device For some historical reaons, we misuse the word *library* for *device*. For example, we call the deivce type by *library type*. An example is the header file [`library_type.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/library_type.h#L24). We will correct this ASAP. @@ -23,7 +23,7 @@ enum class LibraryType { ``` -#### Add A New [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L53) +### Add A New [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L53) If you have a new kind of Device, firstly you need to add a new kind of [`Place`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L53). For example `CUDAPlace`: @@ -45,7 +45,7 @@ struct CUDAPlace { typedef boost::variant Place; ``` -#### Add [device context]((https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h#L37)) +### Add [device context]((https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h#L37)) After a new kind of Device is added, you should add a corresponding [DeviceContext](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h#L37) for it. ```cpp @@ -58,7 +58,7 @@ class DeviceContext { }; ``` -#### Implement new [OpKernel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L351) for your Device. +### Implement new [OpKernel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L351) for your Device. A detailed documentation can be found in [`new_op_and_kernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_en.md) @@ -85,7 +85,7 @@ class OpKernel : public OpKernelBase { ``` -#### Register the OpKernel to framework +### Register the OpKernel to framework After writing the components described above, we should register the kernel to the framework. @@ -107,7 +107,7 @@ take [`conv2d`]((https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/oper REGISTER_OP_KERNEL(conv2d, CPU, paddle::platform::CPUPlace, paddle::operators::GemmConvKernel, paddle::operators::GemmConvKernel); - + REGISTER_OP_KERNEL(conv2d, CUDNN, ::paddle::platform::CUDAPlace, paddle::operators::CUDNNConvOpKernel, paddle::operators::CUDNNConvOpKernel); diff --git a/doc/fluid/dev/op_markdown_format.md b/doc/fluid/dev/op_markdown_format.md index 0ee804d592252c727622cbe59b0644813db3c4fd..4e539d7992e5f67ee7b07193b59b6b425b73c9e5 100644 --- a/doc/fluid/dev/op_markdown_format.md +++ b/doc/fluid/dev/op_markdown_format.md @@ -15,26 +15,26 @@ The signature of the operator. Each section mentioned above has been covered in further detail in the rest of the document. -# PaddlePaddle Operator Name +## PaddlePaddle Operator Name This should be in all small letters, in case of multiple words, we separate them with an underscore. For example: `array to lod tensor` should be written as `array_to_lod_tensor`. This naming convention should be standard across all PaddlePaddle operators. -# Standard Operator Name +## Standard Operator Name This is the standard name of the operator as used in the community. The general standard is usually: - Standard abbreviations like `SGD` are written in all capital letters. - Operator names that have multiple words inside a single word use `camelCase` (capitalize word boundaries inside of a word). - Keep numbers inside a word as is, with no boundary delimiters. - Follow the name of the operator with the keyword: `Activation Operator.` -# Operator description +## Operator description This section should contain the description of what the operator does, including the operation performed, the literature from where it comes and was introduced first, and other important details. The relevant paper/article including the hyperlink should be cited in this section. -# LaTeX equation +## LaTeX equation This section should contain an overall equation of the update or operation that the operator performs. The variables used in the equation should follow the naming convention of operators as described [here](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/name_convention.md). Two words in the same word should be separated by an underscore (`_`). -# The signature +## The signature This section describes the signature of the operator. A list of Inputs and Outputs, each of which have a small description of what the variable represents and the type of variable. The variable names follow the `CamelCase` naming convention. The proposed format for this is: `Section : VariableName : (VariableType) VariableDescription diff --git a/doc/fluid/dev/releasing_process.md b/doc/fluid/dev/releasing_process_cn.md similarity index 58% rename from doc/fluid/dev/releasing_process.md rename to doc/fluid/dev/releasing_process_cn.md index b9787261092f1f27377886152cb1596d9ff54188..4c6728fba7150b0f1e180e57590f18a5b677c70d 100644 --- a/doc/fluid/dev/releasing_process.md +++ b/doc/fluid/dev/releasing_process_cn.md @@ -10,19 +10,10 @@ PaddlePaddle每次发新的版本,遵循以下流程: * 使用Regression Test List作为检查列表,测试本次release的正确性。 * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,到第二步 * 修改`python/setup.py.in`中的版本信息,并将`istaged`字段设为`True`。 - * 编译这个版本的python wheel包,并发布到pypi。 - * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 - * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 - * 上传方法: - ``` - cd build/python - pip install twine - twine upload dist/[package to upload] - ``` - * 编译这个版本的Docker发行镜像,发布到dockerhub。如果失败,修复Docker编译镜像问题,Patch号加一,返回第二步 -1. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 -1. 协同完成Release Note的书写 - + * 将这个版本的python wheel包发布到pypi。 + * 更新Docker镜像(参考后面的操作细节)。 +1. 第三步完成后,将`release/版本号`分支合入master分支,将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。 +1. 协同完成Release Note的书写。 需要注意的是: @@ -31,13 +22,18 @@ PaddlePaddle每次发新的版本,遵循以下流程: ## 发布wheel包到pypi -使用[PaddlePaddle CI](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) +1. 使用[PaddlePaddle CI](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) 完成自动化二进制编译,参考下图,选择需要发布的版本(通常包含一个CPU版本和一个GPU版本),点击"run"右侧的"..."按钮,可以 -弹出下面的选择框,在第二个tab (Changes)里选择需要发布的分支,这里选择0.11.0,然后点击"Run Build"按钮。等待编译完成后 -可以在此页面的"Artifacts"下拉框中找到生成的3个二进制文件,分别对应CAPI,`cp27m`和`cp27mu`的版本。然后按照上述的方法 -使用`twine`工具上传即可。 - - +弹出下面的选择框,在第二个tab (Changes)里选择需要发布的分支,这里选择0.11.0,然后点击"Run Build"按钮。 + +1. 等待编译完成后可以在此页面的"Artifacts"下拉框中找到生成的3个二进制文件,分别对应CAPI,`cp27m`和`cp27mu`的版本。 +1. 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 +1. 上传: +``` +cd build/python +pip install twine +twine upload dist/[package to upload] +``` * 注:CI环境使用 https://github.com/PaddlePaddle/buildtools 这里的DockerImage作为编译环境以支持更多的Linux 发型版,如果需要手动编译,也可以使用这些镜像。这些镜像也可以从 https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/ 下载得到。 @@ -48,10 +44,20 @@ PaddlePaddle每次发新的版本,遵循以下流程: 上述PaddlePaddle CI编译wheel完成后会自动将Docker镜像push到DockerHub,所以,发布Docker镜像只需要对自动push的镜像打上 版本号对应的tag即可: -1. 进入 https://hub.docker.com/r/paddlepaddle/paddle/tags/ 查看latest tag的更新时间是否在上述编译wheel包完成后是否最新。 -1. 执行 `docker pull paddlepaddle/paddle:[latest tag]`,latest tag可以是latest或latest-gpu等。 -1. 执行 `docker tag paddlepaddle/paddle:[latest tag] paddlepaddle/paddle:[version]` -1. 执行 `docker push paddlepaddle/paddle:[version]` +``` +docker pull [镜像]:latest +docker tag [镜像]:latest [镜像]:[version] +docker push [镜像]:[version] +``` + +需要更新的镜像tag包括: + +* `[version]`: CPU版本 +* `[version]-openblas`: openblas版本 +* `[version]-gpu`: GPU版本(CUDA 8.0 cudnn 5) +* `[version]-gpu-[cudaver]-[cudnnver]`: 不同cuda, cudnn版本的镜像 + +之后可进入 https://hub.docker.com/r/paddlepaddle/paddle/tags/ 查看是否发布成功。 ## PaddlePaddle 分支规范 @@ -66,7 +72,7 @@ PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git- * 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支 * 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。 * 当功能分支开发完毕后,向PaddlePaddle的主版本库提交`Pull Reuqest`,进而进行代码评审。 - * 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。 + * 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。 * BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。 @@ -76,15 +82,118 @@ PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git- ### PaddlePaddle Book中所有章节 -PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 - -| | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | -| API.V2 + Docker + GPU | | | | | | | | | -| API.V2 + Docker + CPU | | | | | | | | | -| `paddle_trainer` + Docker + GPU | | | | | | | | | -| `paddle_trainer` + Docker + CPU | | | | | | | | | -| API.V2 + Ubuntu + GPU | | | | | | | | | -| API.V2 + Ubuntu + CPU | | | | | | | | | -| `paddle_trainer` + Ubuntu + GPU | | | | | | | | | -| `paddle_trainer` + Ubuntu + CPU | | | | | | | | | +PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练(V2和Fluid)模型正确性。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
新手入门章节 识别数字 图像分类词向量 情感分析语意角色标注 机器翻译个性化推荐
API.V2 + Docker + GPU
API.V2 + Docker + CPU
`paddle_trainer` + Docker + GPU
`paddle_trainer` + Docker + CPU
API.V2 + Ubuntu + GPU
API.V2 + Ubuntu + CPU
`paddle_trainer` + Ubuntu + GPU
`paddle_trainer` + Ubuntu + CPU
diff --git a/doc/fluid/dev/releasing_process_en.md b/doc/fluid/dev/releasing_process_en.md new file mode 100644 index 0000000000000000000000000000000000000000..f989b964d6d1a329bbe31adc7ec10db017acaefa --- /dev/null +++ b/doc/fluid/dev/releasing_process_en.md @@ -0,0 +1,210 @@ +# PaddlePaddle Releasing Process + +PaddlePaddle manages its branches using "git-flow branching model", and [Semantic Versioning](http://semver.org/) as it's version number semantics. + +Each time we release a new PaddlePaddle version, we should follow the below steps: + +1. Fork a new branch from `develop` named `release/[version]`, e.g. `release/0.10.0`. +1. Push a new tag on the release branch, the tag name should be like `[version]rc.patch`. The + first tag should be `0.10.0rc1`, and the second should be `0.10.0.rc2` and so on. +1. After that, we should do: + * Run all regression test on the Regression Test List (see PaddlePaddle TeamCity CI), to confirm + that this release has no major bugs. + * If regression test fails, we must fix those bugs and create a new `release/[version]` + branch from previous release branch. + * Modify `python/setup.py.in`, change the version number and change `ISTAGED` to `True`. + * Publish PaddlePaddle release wheel packages to pypi (see below instructions for detail). + * Update the Docker images (see below instructions for detail). +1. After above step, merge `release/[version]` branch to master and push a tag on the master commit, + then merge `master` to `develop`. +1. Update the Release Note. + +***NOTE:*** + +* Do ***NOT*** merge commits from develop branch to release branches to keep the release branch contain + features only for current release, so that we can test on that version. +* If we want to fix bugs on release branches, we must merge the fix to master, develop and release branch. + +## Publish Wheel Packages to pypi + +1. Use our [CI tool](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) + to build all wheel packages needed to publish. As shown in the following picture, choose a build + version, click "..." button on the right side of "Run" button, and switch to the second tab in the +pop-up box, choose the current release branch and click "Run Build" button. You may repeat this + step to start different versions of builds. + +1. After the build succeeds, download the outputs under "Artifacts" including capi, `cp27m` and `cp27mu`. +1. Since pypi.python.org follows [PEP 513](https://www.python.org/dev/peps/pep-0513), before we + upload the package using `twine`, we need to rename the package from `linux_x86_64` to + `manylinux1_x86_64`. +1. Start the upload: + ``` + cd build/python + pip install twine + twine upload dist/[package to upload] + ``` + +* NOTE: We use a special Docker image to build our releases to support more Linux distributions, you can + download it from https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/, or build it using + scripts under `tools/manylinux1`. +* pypi does not allow overwrite the already uploaded version of wheel package, even if you delete the + old version. you must change the version number before upload a new one. + +## Publish Docker Images + +Our CI tool will push latest images to DockerHub, so we only need to push a version tag like: + +``` +docker pull [image]:latest +docker tag [image]:latest [image]:[version] +docker push [image]:[version] +``` + +Tags that need to be updated are: +* `[version]`: CPU only version image +* `[version]-openblas`: openblas version image +* `[version]-gpu`: GPU version(using CUDA 8.0 cudnn 5) +* `[version]-gpu-[cudaver]-[cudnnver]`: tag for different cuda, cudnn versions + +You can then checkout the latest pushed tags at https://hub.docker.com/r/paddlepaddle/paddle/tags/. + +## Branching Model + +We use [git-flow](http://nvie.com/posts/a-successful-git-branching-model/) as our branching model, +with some modifications: + +* `master` branch is the stable branch. Each version on the master branch is tested and guaranteed. +* `develop` branch is for development. Each commit on develop branch has passed CI unit test, but no + regression tests are run. +* `release/[version]` branch is used to publish each release. Latest release version branches have + bugfix only for that version, but no feature updates. +* Developer forks are not required to follow + [git-flow](http://nvie.com/posts/a-successful-git-branching-model/) + branching model, all forks is like a feature branch. + * Advise: developer fork's develop branch is used to sync up with main repo's develop branch. + * Advise: developer use it's fork's develop branch to for new branch to start developing. + * Use that branch on developer's fork to create pull requests and start reviews. + * developer can push new commits to that branch when the pull request is open. +* Bug fixes are also started from developers forked repo. And, bug fixes branch can merge to + `master`, `develop` and `releases`. + +## PaddlePaddle Regression Test List + +### All Chapters of PaddlePaddle Book + +We need to guarantee that all the chapters of PaddlePaddle Book can run correctly. Including +V1 (`paddle_trainer` training) and V2 training and Fluid training. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Linear RegressionRecognize DigitsImage ClassificationWord2VecPersonalized RecommendationSentiment AnalysisSemantic Role LabelingMachine Translation
API.V2 + Docker + GPU
API.V2 + Docker + CPU
`paddle_trainer` + Docker + GPU
`paddle_trainer` + Docker + CPU
API.V2 + Ubuntu + GPU
API.V2 + Ubuntu + CPU
`paddle_trainer` + Ubuntu + GPU
`paddle_trainer` + Ubuntu + CPU
diff --git a/doc/fluid/dev/use_eigen_cn.md b/doc/fluid/dev/use_eigen_cn.md index f36843b4408c21bdca1fa83853e5b0a40116791c..75922e7d85a13e53ce94619a48d8da8b960e6c9a 100644 --- a/doc/fluid/dev/use_eigen_cn.md +++ b/doc/fluid/dev/use_eigen_cn.md @@ -1,16 +1,16 @@ -## 在Paddle中如何使用Eigen +# 在Paddle中如何使用Eigen 神经网络本质上是一个计算图,计算需要的数据存放在`Tensor`中,而计算过程是由`Operartor`来描述的。在执行时,`Operator`调用对应`OpKernel`中的`Compute`接口,实现对`Tensor`的操作。 -### Eigen Tensor模块 +## Eigen Tensor模块 Eigen Tensor模块对element-wise计算提供了强大的支持,并且书写一份代码,可以同时在CPU、GPU执行。但Eigen Tensor是一个正在开发中的模块,因此可能测试不够完备,文档较少。 关于Eigen Tensor模块的详细介绍请参考[文档1](https://github.com/RLovelett/eigen/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md) 和[文档2](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md) -### paddle::framework::Tensor +## paddle::framework::Tensor Paddle Tensor定义在framework目录下,其主要接口如下: @@ -20,14 +20,14 @@ class Tensor { /*! Return a pointer to mutable memory block. */ template inline T* data(); - + /** * @brief Return a pointer to mutable memory block. * @note If not exist, then allocation. */ template inline T* mutable_data(platform::Place place); - + /** * @brief Return a pointer to mutable memory block. * @@ -38,17 +38,17 @@ class Tensor { */ template inline T* mutable_data(DDim dims, platform::Place place); - + /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); - + /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; private: /*! holds the memory block if allocated. */ std::shared_ptr holder_; - + /*! points to dimensions of memory block. */ DDim dim_; }; @@ -129,7 +129,7 @@ From是EigenTensor模板提供的一个接口,可以实现从paddle::framework -### 实现计算 +## 实现计算 当需要完成计算时,我们需要等式左边的EigenTensor调用device接口。在这里需要注意的是,这里的EigenTensor之间的运算只是改变了原有Tensor中的数据,而不会改变原有Tensor的shape信息。 diff --git a/doc/fluid/dev/use_eigen_en.md b/doc/fluid/dev/use_eigen_en.md index 3a466f73d1f9b94a29b171015279c782ca50bd02..3313d097cb21e40c23aa13187b6a50562f12403a 100644 --- a/doc/fluid/dev/use_eigen_en.md +++ b/doc/fluid/dev/use_eigen_en.md @@ -1,9 +1,9 @@ -## How to use Eigen in Paddle +# How to use Eigen in Paddle Essentially, a neural network is a compute graph. T data needed for the computation is stored in `Tensor`s and its computation procedure is described by `Operator`s. An `Operator` calls the `Compute` interface in its corresponding `OpKernel` and operates on the `Tensor`. -### Eigen Tensor Module +## Eigen Tensor Module The Eigen Tensor module supports powerful element-wise computation. In addition, a piece of code written using it can be run on both the CPU and the GPU. @@ -12,7 +12,7 @@ Note that Eigen Tensor is still being actively developed, so its tests are not c For details on Eigen Tensor module, please see [doc 1](https://github.com/RLovelett/eigen/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md) and [doc 2](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md). -### paddle::framework::Tensor +## paddle::framework::Tensor Paddle Tensor's is defined in the framework directory with the following interface: @@ -105,7 +105,7 @@ void Compute(const framework::ExecutionContext& context) const override { ``` -### paddle::framework::Tensor到EigenTensor的转换 +## paddle::framework::Tensor到EigenTensor的转换 As shown above, in actual computation, we need to transform the input and output `Tensor`s into formats Eigen supports. We show some functions in [eigen.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/eigen.h) to implement the transformation from `paddle::framework::Tensor`to `EigenTensor/EigenMatrix/EigenVector/EigenScalar`. @@ -129,7 +129,7 @@ For more transformations, see the [unit tests](https://github.com/PaddlePaddle/P -### Implementing Computation +## Implementing Computation While computing, the device interface is needed from the EigenTensors on the left hand side of the assignments. Note that the computation between EigenTensors only changes the data originally inthe Tensor and does not change all the shape information associated with the Tensor. diff --git a/doc/fluid/getstarted/concepts/index_cn.rst b/doc/fluid/getstarted/concepts/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e7f70fc4cb871a80ffaffec6c06797973cd2f85 --- /dev/null +++ b/doc/fluid/getstarted/concepts/index_cn.rst @@ -0,0 +1,4 @@ +基本使用概念 +============ + +TBD diff --git a/doc/fluid/getstarted/concepts/index_en.rst b/doc/fluid/getstarted/concepts/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..78cca1e2a3443c2949ca0655190b0f05502f519a --- /dev/null +++ b/doc/fluid/getstarted/concepts/index_en.rst @@ -0,0 +1,4 @@ +Concepts +============ + +TBD diff --git a/doc/fluid/getstarted/concepts/save_model/model_format.md b/doc/fluid/getstarted/concepts/save_model/model_format.md index e29129fddf775939c9f7a8b49d850d523e6e5a45..1f12ba0497369eacc6a2db7984781b5672f45ea1 100644 --- a/doc/fluid/getstarted/concepts/save_model/model_format.md +++ b/doc/fluid/getstarted/concepts/save_model/model_format.md @@ -4,30 +4,70 @@ A model is an output of the training process. One complete model consists of two parts, the **topology** and the **parameters**. In order to support industrial deployment, the model format must be self-complete and must not expose any training source code. -As a result, In PaddlePaddle, the **topology** is represented as a [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/1c0a4c901c9fc881d120249c703b15d1c50dae7d/doc/design/program.md), which describes the model structure. The **parameters** contain all the trainable weights in the model. We must support large size parameters and efficient serialization/deserialization of parameters. +As a result, In PaddlePaddle, the **topology** is represented as a [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/1c0a4c901c9fc881d120249c703b15d1c50dae7d/doc/design/program.md), which describes the model structure. The **parameters** contain all the trainable weights in the model. We must support large size parameters and efficient serialization/deserialization of parameters. ## Implementation -The topology is saved as a plain text in a detailed self-contain protobuf file. +The topology is saved as a plain text in a detailed self-contain protobuf file. The parameters are saved as a binary file. As we all know, the protobuf message has a limit of [64M size](https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.coded_stream#CodedInputStream.SetTotalBytesLimit.details). We have done a [benchmark experiment](https://github.com/PaddlePaddle/Paddle/pull/4610), which shows that protobuf is not fit for the task. -As a result, we design a particular format for tensor serialization. By default, an arbitrary tensor in Paddle is a [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md), and has a description information proto of [LoDTensorDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L99). We save the DescProto as the byte string header. It contains all the necessary information, such as the `dims`, and the `LoD` information in [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/1c0a4c901c9fc881d120249c703b15d1c50dae7d/paddle/framework/lod_tensor.md). A tensor stores values in a continuous memory buffer. For speed we dump the raw memory to disk and save it as the byte string content. So, the binary format of one tensor is, +As a result, we design a particular format for tensor serialization. By default, an arbitrary tensor in Paddle is a [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md), and has a description information proto of [LoDTensorDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L99). We save the DescProto as the byte string header. It contains all the necessary information, such as the `dims`, and the `LoD` information in [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/1c0a4c901c9fc881d120249c703b15d1c50dae7d/paddle/framework/lod_tensor.md). A tensor stores values in a continuous memory buffer. For speed we dump the raw memory to disk and save it as the byte string content. So, the binary format of one tensor is, The table below shows a tensor's byte view in detail. Note that all the signed values are written in the little-endian format. -|field name | type | description | -| --- | --- | --- | -| version | uint32_t | Version of saved file. Always 0 now. | -| tensor desc length | uint32_t | TensorDesc(Protobuf message) length in bytes. | -| tensor desc | void* | TensorDesc protobuf binary message | -| tensor data | void* | Tensor's data in binary format. The length of `tensor_data` is decided by `TensorDesc.dims()` and `TensorDesc.data_type()` | -| lod_level | uint64_t | Level of LoD | -| length of lod[0] | uint64_t | [Optional] length of lod[0] in bytes. | -| data of lod[0] | uint64_t* | [Optional] lod[0].data() | -| ... | ... | ... | - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
field nametype description
version uint32_t Version of saved file. Always 0 now.
tensor desc length uint32_t TensorDesc(Protobuf message) length in bytes.
tensor desc void* TensorDesc protobuf binary message
tensor data void* Tensor's data in binary format. The length of `tensor_data` is decided by `TensorDesc.dims()` and `TensorDesc.data_type()`
lod_level uint64_t Level of LoD
length of lod[0] uint64_t [Optional] length of lod[0] in bytes.
data of lod[0] uint64_t* [Optional] lod[0].data()
... ... ...
## Summary diff --git a/doc/fluid/getstarted/index_cn.rst b/doc/fluid/getstarted/index_cn.rst index c4d8525f23ee18cb7f41ab2f0d148fc1dcc852b2..75af7354be93a6eeabfa9ccf86903505402a7ca6 100644 --- a/doc/fluid/getstarted/index_cn.rst +++ b/doc/fluid/getstarted/index_cn.rst @@ -1,4 +1,19 @@ 新手入门 ------------- +============ -新手入门 + +如果需要快速了解PaddlePaddle的使用,可以参考以下指南。 + +.. toctree:: + :maxdepth: 1 + + quickstart_cn.rst + + +在使用PaddlePaddle构建应用时,需要了解一些基本概念。 +这里以一个线性回归为例子,详细介绍了PaddlePaddle的使用流程,包括数据格式,模型配置与训练等。 + +.. toctree:: + :maxdepth: 1 + + concepts/use_concepts_cn.rst diff --git a/doc/fluid/getstarted/index_en.rst b/doc/fluid/getstarted/index_en.rst index a4efd05e2fd94ac0e2cbbc8603e6b0261b7e787f..75a43f4af87c34830ec940068196e6ca72640501 100644 --- a/doc/fluid/getstarted/index_en.rst +++ b/doc/fluid/getstarted/index_en.rst @@ -1,4 +1,18 @@ GET STARTED ------------- +============ -This is get started page +If you want to quickly know how to use PaddlePaddle, please refer to the following guide: + +.. toctree:: + :maxdepth: 1 + + quickstart_en.rst + +While using PaddlePaddle to build applications, please understand some basic concepts. + +Here is an example of linear regression. It introduces workflow of PaddlePaddle, including data format, model configuration and training, etc. + +.. toctree:: + :maxdepth: 1 + + concepts/index_en.rst diff --git a/doc/fluid/getstarted/quickstart_cn.rst b/doc/fluid/getstarted/quickstart_cn.rst new file mode 120000 index 0000000000000000000000000000000000000000..93a9e4e37a8495c553cec257c27363ca8d062d39 --- /dev/null +++ b/doc/fluid/getstarted/quickstart_cn.rst @@ -0,0 +1 @@ +../../v2/getstarted/quickstart_cn.rst \ No newline at end of file diff --git a/doc/fluid/getstarted/quickstart_en.rst b/doc/fluid/getstarted/quickstart_en.rst new file mode 120000 index 0000000000000000000000000000000000000000..6e1894faa1176bb9e77f616e07df36191e54b782 --- /dev/null +++ b/doc/fluid/getstarted/quickstart_en.rst @@ -0,0 +1 @@ +../../v2/getstarted/quickstart_en.rst \ No newline at end of file diff --git a/doc/fluid/howto/cluster/fluid_cluster_train_cn.md b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md index 1b6f767869aaa800c122c8e7a06a1413e48e10e0..b99b90056b0a2e51f2668a6d27d94857bdc09c37 100644 --- a/doc/fluid/howto/cluster/fluid_cluster_train_cn.md +++ b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md @@ -65,10 +65,10 @@ exit(1) **因此,在分布式的Fluid环境中,我们有两个角色需要创建,分别是Parameter Server和Trainer。** -### 分布式训练 +### 分布式训练 Fliud专门提供了工具[Distributed Transpiler](https://github.com/PaddlePaddle/Paddle/blob/ba65d54d9d3b41cd3c5171b00f476d4e60133ddb/doc/fluid/design/dist_train/distributed_architecture.md#distributed-transpiler)用于将单机版的训练程序转换为分布式版本的训练程序。工具背后的理念是找出程序的优化算子和梯度参数,将他们分隔为两部分,通过send/recv 操作算子进行连接,优化算子和梯度参数可以在优化器的minimize函数的返回值中获取到。 ```python -optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) +optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) ``` 将Distributed Transpiler、优化算子和梯度函数放在一个代码中如下: ```python @@ -99,15 +99,51 @@ for pass_id in range(100): ### 分布式训练脚本运行说明 分布式任务的运行需要将表格中说明的多个参数进行赋值: -| 参数名 | 值类型 | 说明 | 示例 | -|:-------------|:------|:---------------------------------------|:-------------| -| trainer_id | int | 当前训练节点的ID,训练节点ID编号为0 - n-1, n为trainers的值 | 0/1/2/3 | -| pservers | str | parameter server 列表 | 127.0.0.1:6710,127.0.0.1:6711 | -| trainers | int | 训练节点的总个数,>0的数字 | 4 | -| server_endpoint | str | 当前所起的服务节点的IP:PORT | 127.0.0.1:8789 | -| training_role | str | 节点角色, TRAINER/PSERVER | PSERVER | - -**注意:** ```training_role```是用来区分当前所起服务的角色的,用于训练程序中,用户可根据需要自行定义,其他参数为fluid.DistributeTranspiler的transpile函数所需要,需要在调用函数前进行定义,样例如下: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数名 值类型说明 示例
trainer_id int 当前训练节点的ID,训练节点ID编号为0 - n-1, n为trainers的值 0/1/2/3
pservers str parameter server 列表 127.0.0.1:6710,127.0.0.1:6711
trainers int 训练节点的总个数,>0的数字 4
server_endpoint str 当前所起的服务节点的IP:PORT 127.0.0.1:8789
training_rolestr 节点角色, TRAINER/PSERVER PSERVER
+ + +**注意:** ```training_role```是用来区分当前所起服务的角色的,用于训练程序中,用户可根据需要自行定义,其他参数为fluid.DistributeTranspiler的transpile函数所需要,需要在调用函数前进行定义,样例如下: ```python t = fluid.DistributeTranspiler() diff --git a/doc/fluid/howto/index_cn.rst b/doc/fluid/howto/index_cn.rst index a92abad0c56a4fd821f9a6b9f4f5909504c8aaf1..97aeaf167d329529f2b120b5a3d4085e0510fe16 100644 --- a/doc/fluid/howto/index_cn.rst +++ b/doc/fluid/howto/index_cn.rst @@ -1,2 +1,7 @@ 进阶使用 ------------ + +.. toctree:: + :maxdepth: 1 + + optimization/index_cn.rst diff --git a/doc/fluid/howto/index_en.rst b/doc/fluid/howto/index_en.rst index 06036bdce554a96443ea1fa47c15f7670ea6089d..fd21e167ce3a46da167db1e9d7013804f730e047 100644 --- a/doc/fluid/howto/index_en.rst +++ b/doc/fluid/howto/index_en.rst @@ -1,4 +1,7 @@ HOW TO ------------ -This is how to page +.. toctree:: + :maxdepth: 1 + + optimization/index_en.rst diff --git a/doc/fluid/howto/optimization/benchmark/README.md b/doc/fluid/howto/optimization/benchmark/README.md new file mode 120000 index 0000000000000000000000000000000000000000..db30af7f53231c687f9ad61ad961a685733cbad0 --- /dev/null +++ b/doc/fluid/howto/optimization/benchmark/README.md @@ -0,0 +1 @@ +../../../../../benchmark/cluster/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/benchmark/index_cn.rst b/doc/fluid/howto/optimization/benchmark/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9404800eb86ca6d27886258b67393028c76954dc --- /dev/null +++ b/doc/fluid/howto/optimization/benchmark/index_cn.rst @@ -0,0 +1,8 @@ +基准 +------------ + +.. toctree:: + :maxdepth: 1 + + vgg16/README.md + README.md diff --git a/doc/fluid/howto/optimization/benchmark/index_en.rst b/doc/fluid/howto/optimization/benchmark/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..1e200b660cc7f6aeaf8b3d94fd7a14999a52bccd --- /dev/null +++ b/doc/fluid/howto/optimization/benchmark/index_en.rst @@ -0,0 +1,8 @@ +Benchmark +------------ + +.. toctree:: + :maxdepth: 1 + + vgg16/README.md + README.md diff --git a/doc/fluid/howto/optimization/benchmark/vgg16/README.md b/doc/fluid/howto/optimization/benchmark/vgg16/README.md new file mode 120000 index 0000000000000000000000000000000000000000..ca963ef5f06aa0c2fe507ba7548dca8017358120 --- /dev/null +++ b/doc/fluid/howto/optimization/benchmark/vgg16/README.md @@ -0,0 +1 @@ +../../../../../../benchmark/cluster/vgg16/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/cpu_profiling_cn.md b/doc/fluid/howto/optimization/cpu_profiling_cn.md index d59be670c2b33b64d9b6f96b53f50e5bf9f0613b..8266dec3c6125a09b90ac0ccd4aa5464f5c7db31 100644 --- a/doc/fluid/howto/optimization/cpu_profiling_cn.md +++ b/doc/fluid/howto/optimization/cpu_profiling_cn.md @@ -8,7 +8,7 @@ PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大 * Python 与 C++ 混合代码的性能分析 -## Python代码的性能分析 +# Python代码的性能分析 ### 生成性能分析文件 @@ -42,14 +42,40 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py 每一列的含义是: -| 列名 | 含义 | -| --- | --- | -| ncalls | 函数的调用次数 | -| tottime | 函数实际使用的总时间。该时间去除掉本函数调用其他函数的时间 | -| percall | tottime的每次调用平均时间 | -| cumtime | 函数总时间。包含这个函数调用其他函数的时间 | -| percall | cumtime的每次调用平均时间 | -| filename:lineno(function) | 文件名, 行号,函数名 | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
列名含义
ncalls 函数的调用次数
tottime 函数实际使用的总时间。该时间去除掉本函数调用其他函数的时间
percall tottime的每次调用平均时间
cumtime 函数总时间。包含这个函数调用其他函数的时间
percall cumtime的每次调用平均时间
filename:lineno(function) 文件名, 行号,函数名
### 寻找性能瓶颈 diff --git a/doc/fluid/howto/optimization/cpu_profiling_en.md b/doc/fluid/howto/optimization/cpu_profiling_en.md index 01e5fddf61547f9fc86ef18a6f2e2ac508d22dbb..e95556dd608b7ff0a3eb18873df0015a2da94e7c 100644 --- a/doc/fluid/howto/optimization/cpu_profiling_en.md +++ b/doc/fluid/howto/optimization/cpu_profiling_en.md @@ -14,7 +14,7 @@ the profiling and tuning of 1. the Python code and 1. the mixture of Python and C++ code. -## Profiling the Python Code +# Profiling the Python Code ### Generate the Performance Profiling File @@ -57,14 +57,40 @@ port, we will see the output like the following: where each line corresponds to Python function, and the meaning of each column is as follows: -| column | meaning | -| --- | --- | -| ncalls | the number of calls into a function | -| tottime | the total execution time of the function, not including the execution time of other functions called by the function | -| percall | tottime divided by ncalls | -| cumtime | the total execution time of the function, including the execution time of other functions being called | -| percall | cumtime divided by ncalls | -| filename:lineno(function) | where the function is defined | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
columnmeaning
ncalls the number of calls into a function
tottime the total execution time of the function, not including the execution time of other functions called by the function
percall tottime divided by ncalls
cumtime the total execution time of the function, including the execution time of other functions being called
percall cumtime divided by ncalls
filename:lineno(function) where the function is define
### Identify Performance Bottlenecks @@ -81,7 +107,7 @@ focus on. We can sort above profiling file by tottime: We can see that the most time-consuming function is the `built-in method run`, which is a C++ function in `libpaddle.so`. We will -explain how to profile C++ code in the next section. At this +explain how to profile C++ code in the next section. At this moment, let's look into the third function `sync_with_cpp`, which is a Python function. We can click it to understand more about it: diff --git a/doc/fluid/howto/optimization/index_cn.rst b/doc/fluid/howto/optimization/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..27cc96702356703b339db845dc81913bdcc9f23b --- /dev/null +++ b/doc/fluid/howto/optimization/index_cn.rst @@ -0,0 +1,9 @@ +性能优化 +------------ + +.. toctree:: + :maxdepth: 1 + + timeline.md + cpu_profiling_cn.md + benchmark/index_cn.rst diff --git a/doc/fluid/howto/optimization/index_en.rst b/doc/fluid/howto/optimization/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..4ce624fe8f108a6afc7cd08a1542332755d22e04 --- /dev/null +++ b/doc/fluid/howto/optimization/index_en.rst @@ -0,0 +1,9 @@ +Performance Optimization +--------------------------- + +.. toctree:: + :maxdepth: 1 + + timeline.md + cpu_profiling_en.md + benchmark/index_en.rst diff --git a/doc/fluid/howto/optimization/timeline.md b/doc/fluid/howto/optimization/timeline.md index 9d9565a3e698a83ca465c5da83ff892360c33b8f..96481ae2a6e4442d40803f8d5361e5f942502df3 100644 --- a/doc/fluid/howto/optimization/timeline.md +++ b/doc/fluid/howto/optimization/timeline.md @@ -1,4 +1,4 @@ -## how to use timeline tool to do profile +# how to use timeline tool to do profile 1. Add `with profiler.profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. diff --git a/doc/fluid/howto/performance/profiler.md b/doc/fluid/howto/performance/profiler.md index b20b5efdc1f1f10ce7cec835adcc6fb374ed4e20..ee96e7c74ce317caddb387cbb1d4998937bd5c81 100644 --- a/doc/fluid/howto/performance/profiler.md +++ b/doc/fluid/howto/performance/profiler.md @@ -23,7 +23,7 @@ But how to record the time for the mixed C++ and CUDA program? There many C++ A The overall flow is shown as the following figure. -
+
### Event @@ -36,10 +36,10 @@ enum EventKind { kPopRange}; ``` - kMark: only a marker without time range. -- kPushRange: mark the starting event for time range. +- kPushRange: mark the starting event for time range. - kPopRange: mark the ending event for time range. -For the CPU code, the events only need to record the current time. For the CUDA code, the [event management functions of CUDA](http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html#group__CUDART__EVENT) are used. For many pieces of code, an event lists are used to record each piece. +For the CPU code, the events only need to record the current time. For the CUDA code, the [event management functions of CUDA](http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html#group__CUDART__EVENT) are used. For many pieces of code, an event lists are used to record each piece. ```c++ class Event { @@ -66,11 +66,11 @@ struct EventList { }; ``` -As mentioned above, there is no need to record the timeline when disabling the profiler. So there is a global state to enable or disable the profiler. +As mentioned above, there is no need to record the timeline when disabling the profiler. So there is a global state to enable or disable the profiler. ```c++ enum ProfilerState { - kDisabled, + kDisabled, kCPU, kCUDA }; diff --git a/doc/fluid/images/2_level_rnn.dot b/doc/fluid/images/2_level_rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..5d77865061ca7bbbfcf254dd938f09aef5553505 --- /dev/null +++ b/doc/fluid/images/2_level_rnn.dot @@ -0,0 +1,56 @@ +digraph G { + + rnn [label="1st level RNN" shape=box] + + subgraph cluster0 { + label = "time step 0" + + sent0 [label="sentence"] + sent1 [label="sentence"] + + rnn1 [label="2nd level RNN" shape=box] + + sent0 -> rnn1 + sent1 -> rnn1 + } + + subgraph cluster1 { + label = "time step 1" + + sent2 [label="sentence"] + sent3 [label="sentence"] + + rnn2 [label="2nd level RNN" shape=box] + + sent2 -> rnn2 + sent3 -> rnn2 + } + + subgraph cluster2 { + label = "time step 2" + + sent4 [label="sentence"] + sent5 [label="sentence"] + + rnn3 [label="2nd level RNN" shape=box] + + sent4 -> rnn3 + sent5 -> rnn3 + } + + + para0 [label="paragraph info 0"] + para1 [label="paragraph info 1"] + para2 [label="paragraph info 2"] + + rnn1 -> para0 + rnn2 -> para1 + rnn3 -> para2 + + para0 -> rnn + para1 -> rnn + para2 -> rnn + + chapter [label="chapter info"] + rnn -> chapter +} diff --git a/doc/fluid/images/2_level_rnn.png b/doc/fluid/images/2_level_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038 Binary files /dev/null and b/doc/fluid/images/2_level_rnn.png differ diff --git a/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg b/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0d90f7b9d8184b314b0ee4e521f53eb5f1b455 Binary files /dev/null and b/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg differ diff --git a/doc/fluid/images/asgd.gif b/doc/fluid/images/asgd.gif new file mode 100644 index 0000000000000000000000000000000000000000..4a0da7bf6df9326a2aab1638b77c5455c18b8c4e Binary files /dev/null and b/doc/fluid/images/asgd.gif differ diff --git a/doc/fluid/images/batch_norm_fork.dot b/doc/fluid/images/batch_norm_fork.dot new file mode 100644 index 0000000000000000000000000000000000000000..4bc47713cba2cb23f1b34fffe6426ef10ac3a9df --- /dev/null +++ b/doc/fluid/images/batch_norm_fork.dot @@ -0,0 +1,25 @@ +digraph ImageBatchNormForkGragh { + subgraph cluster_before { + Prev [label="...", shape=plaintext]; + Rnn [label="rnn_op", shape=box]; + BatchNorm [label="batch_norm_op", shape=box]; + Fc [label="fc_op", shape=box]; + After [label="...", shape=plaintext]; + Prev -> Rnn -> BatchNorm -> Fc -> After; + label="original"; + } + + subgraph cluster_after { + Prev2 [label="...", shape=plaintext]; + Rnn2 [label="rnn_op", shape=box]; + BatchNorm2_1 [label="train_batch_norm_op", shape=box]; + BatchNorm2_2 [label="infer_batch_norm_op", shape=box]; + Fc2_1 [label="fc_op", shape=box]; + Fc2_2 [label="fc_op", shape=box]; + After2_1 [label="...", shape=plaintext]; + After2_2 [label="...", shape=plaintext]; + Prev2 -> Rnn2 -> BatchNorm2_1 -> Fc2_1 -> After2_1; + Rnn2 -> BatchNorm2_2 ->Fc2_2 ->After2_2 + label="forked"; + } +} diff --git a/doc/fluid/images/batch_norm_fork.png b/doc/fluid/images/batch_norm_fork.png new file mode 100644 index 0000000000000000000000000000000000000000..aded62bce5bc268b7a3ef4dc96c89fe21d6ea955 Binary files /dev/null and b/doc/fluid/images/batch_norm_fork.png differ diff --git a/doc/fluid/images/batch_norm_op_kernel.png b/doc/fluid/images/batch_norm_op_kernel.png new file mode 100644 index 0000000000000000000000000000000000000000..a99ce81ff3bf42880ebbd6a1297de3bf038e09b2 Binary files /dev/null and b/doc/fluid/images/batch_norm_op_kernel.png differ diff --git a/doc/fluid/images/beam_search.png b/doc/fluid/images/beam_search.png new file mode 100644 index 0000000000000000000000000000000000000000..7f7e35f34223162d0f7f0ed97375909c43b830ae Binary files /dev/null and b/doc/fluid/images/beam_search.png differ diff --git a/doc/fluid/images/ci_build_whl.png b/doc/fluid/images/ci_build_whl.png new file mode 100644 index 0000000000000000000000000000000000000000..232762b82a9ae3e979a1f38a7beb715c87438f40 Binary files /dev/null and b/doc/fluid/images/ci_build_whl.png differ diff --git a/doc/fluid/images/compiler.graffle b/doc/fluid/images/compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..8cc678fea3c820103e7ce81f7a5d625d6c1d92de Binary files /dev/null and b/doc/fluid/images/compiler.graffle differ diff --git a/doc/fluid/images/compiler.png b/doc/fluid/images/compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..65d34f841afce9756def07dd8ecb9ca44e658bfe Binary files /dev/null and b/doc/fluid/images/compiler.png differ diff --git a/doc/fluid/images/control_flow_graph.png b/doc/fluid/images/control_flow_graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3579998e58d07abc50bd3332128d4733a391cb3b Binary files /dev/null and b/doc/fluid/images/control_flow_graph.png differ diff --git a/doc/fluid/images/dataflow_equations.png b/doc/fluid/images/dataflow_equations.png new file mode 100644 index 0000000000000000000000000000000000000000..c10f7f69f4007952e5b0394edaa04efa1cfbb658 Binary files /dev/null and b/doc/fluid/images/dataflow_equations.png differ diff --git a/doc/fluid/images/dcgan.png b/doc/fluid/images/dcgan.png new file mode 100644 index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28 Binary files /dev/null and b/doc/fluid/images/dcgan.png differ diff --git a/doc/fluid/images/deep_learning.png b/doc/fluid/images/deep_learning.png new file mode 100644 index 0000000000000000000000000000000000000000..026becc4d94e01e407dacb2a5314a0e5723334ff Binary files /dev/null and b/doc/fluid/images/deep_learning.png differ diff --git a/doc/fluid/images/dist-graph.graffle b/doc/fluid/images/dist-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b Binary files /dev/null and b/doc/fluid/images/dist-graph.graffle differ diff --git a/doc/fluid/images/dist-graph.png b/doc/fluid/images/dist-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7 Binary files /dev/null and b/doc/fluid/images/dist-graph.png differ diff --git a/doc/fluid/images/distributed_architecture.graffle b/doc/fluid/images/distributed_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d1b60141342232e06227c2d430ebc60ec349a907 Binary files /dev/null and b/doc/fluid/images/distributed_architecture.graffle differ diff --git a/doc/fluid/images/distributed_architecture.png b/doc/fluid/images/distributed_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..29c7b0c0783f97c6d33b1db1ed484d6a2b9dd356 Binary files /dev/null and b/doc/fluid/images/distributed_architecture.png differ diff --git a/doc/fluid/images/ds2_network.png b/doc/fluid/images/ds2_network.png new file mode 100644 index 0000000000000000000000000000000000000000..1a5b2184d47928cc2849d5a7c8ea2d8cf5337e11 Binary files /dev/null and b/doc/fluid/images/ds2_network.png differ diff --git a/doc/fluid/images/feed_forward.png b/doc/fluid/images/feed_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..d312371a04c26aa6cd196e0bd1f51becb425180b Binary files /dev/null and b/doc/fluid/images/feed_forward.png differ diff --git a/doc/fluid/images/feed_forward_regularized.png b/doc/fluid/images/feed_forward_regularized.png new file mode 100644 index 0000000000000000000000000000000000000000..677e99bfd9f8e72ed9fe4b27127af2ced202f447 Binary files /dev/null and b/doc/fluid/images/feed_forward_regularized.png differ diff --git a/doc/fluid/images/fluid-compiler.graffle b/doc/fluid/images/fluid-compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c933df2cb855462c52b2d25f7f9a99b95652961d Binary files /dev/null and b/doc/fluid/images/fluid-compiler.graffle differ diff --git a/doc/fluid/images/fluid-compiler.png b/doc/fluid/images/fluid-compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0ffed2039c91a3a00bbb719da08c91c3acf7bb Binary files /dev/null and b/doc/fluid/images/fluid-compiler.png differ diff --git a/doc/fluid/images/graph_construction_example.bash b/doc/fluid/images/graph_construction_example.bash new file mode 100755 index 0000000000000000000000000000000000000000..35e6997abd17588e17a82d448918fc1b3bd7220e --- /dev/null +++ b/doc/fluid/images/graph_construction_example.bash @@ -0,0 +1,11 @@ +cat ./graph_construction_example.dot | \ + sed 's/color=red/color=red, style=invis/g' | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_only.png + +cat ./graph_construction_example.dot | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_backward.png + +cat ./graph_construction_example.dot | \ + dot -Tpng > graph_construction_example_all.png diff --git a/doc/fluid/images/graph_construction_example.dot b/doc/fluid/images/graph_construction_example.dot new file mode 100644 index 0000000000000000000000000000000000000000..e115f9844bae6ad24f638c8ed4749cea8aff06a9 --- /dev/null +++ b/doc/fluid/images/graph_construction_example.dot @@ -0,0 +1,68 @@ +digraph ImageClassificationGraph { + ///////// The forward part ///////// + FeedX [label="Feed", color=blue, shape=box]; + FeedY [label="Feed", color=blue, shape=box]; + InitW [label="Init", color=blue, shape=diamond]; + Initb [label="Init", color=blue, shape=diamond]; + FC [label="FC", color=blue, shape=box]; + MSE [label="MSE", color=blue, shape=box]; + + x [label="x", color=blue, shape=oval]; + l [label="l", color=blue, shape=oval]; + y [label="y", color=blue, shape=oval]; + W [label="W", color=blue, shape=doublecircle]; + b [label="b", color=blue, shape=doublecircle]; + cost [label="cost", color=blue, shape=oval]; + + FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; + FeedY -> l [color=blue]; + InitW -> W [color=blue]; + Initb -> b [color=blue]; + W -> FC [color=blue]; + b -> FC [color=blue]; + l -> MSE [color=blue]; + + ////////// The backward part ///////// + MSE_Grad [label="MSE_grad", color=red, shape=box]; + FC_Grad [label="FC_grad", color=red, shape=box]; + + d_cost [label="d cost", color=red, shape=oval]; + d_y [label="d y", color=red, shape=oval]; + d_b [label="d b", color=red, shape=oval]; + d_W [label="d W", color=red, shape=oval]; + + cost -> MSE_Grad [color=red]; + d_cost -> MSE_Grad [color=red]; + l -> MSE_Grad [color=red]; + y -> MSE_Grad -> d_y [color=red]; + + x -> FC_Grad [color=red]; + y -> FC_Grad [color=red]; + d_y -> FC_Grad [color=red]; + W -> FC_Grad -> d_W [color=red]; + b -> FC_Grad -> d_b [color=red]; + + ////////// The optimizaiton part ////////// + + OPT_W [label="SGD", color=green, shape=box]; + OPT_b [label="SGD", color=green, shape=box]; + + W -> OPT_W [color=green]; + b -> OPT_b [color=green]; + d_W -> OPT_W -> W [color=green]; + d_b -> OPT_b -> b [color=green]; + + ////////// Groupings ////////// + + subgraph clusterMSE { + style=invis; + MSE; + MSE_Grad; + } + + subgraph clusterFC { + style=invis; + FC; + FC_Grad; + } +} diff --git a/doc/fluid/images/graph_construction_example_all.png b/doc/fluid/images/graph_construction_example_all.png new file mode 100644 index 0000000000000000000000000000000000000000..261611a5721f9aa97874f7e6d897fe48cf667db2 Binary files /dev/null and b/doc/fluid/images/graph_construction_example_all.png differ diff --git a/doc/fluid/images/graph_construction_example_forward_backward.png b/doc/fluid/images/graph_construction_example_forward_backward.png new file mode 100644 index 0000000000000000000000000000000000000000..4c69687f4a6a181138f3df72ce5e8aa48487b5be Binary files /dev/null and b/doc/fluid/images/graph_construction_example_forward_backward.png differ diff --git a/doc/fluid/images/graph_construction_example_forward_only.png b/doc/fluid/images/graph_construction_example_forward_only.png new file mode 100644 index 0000000000000000000000000000000000000000..e668c16e0cac73acb4e5dc2b1827557ae77126b4 Binary files /dev/null and b/doc/fluid/images/graph_construction_example_forward_only.png differ diff --git a/doc/fluid/images/l1_regularization.png b/doc/fluid/images/l1_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b9c7a44f94dc027598a98da93ddb8133190972 Binary files /dev/null and b/doc/fluid/images/l1_regularization.png differ diff --git a/doc/fluid/images/l2_regularization.png b/doc/fluid/images/l2_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c2fcbc2ccae75ad083162e5a2dceb0210be298 Binary files /dev/null and b/doc/fluid/images/l2_regularization.png differ diff --git a/doc/fluid/images/local-graph.graffle b/doc/fluid/images/local-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5 Binary files /dev/null and b/doc/fluid/images/local-graph.graffle differ diff --git a/doc/fluid/images/local-graph.png b/doc/fluid/images/local-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7 Binary files /dev/null and b/doc/fluid/images/local-graph.png differ diff --git a/doc/fluid/images/local_architecture.graffle b/doc/fluid/images/local_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..49fcc663ebe3824aa234e3a67aadf285cb417877 Binary files /dev/null and b/doc/fluid/images/local_architecture.graffle differ diff --git a/doc/fluid/images/local_architecture.png b/doc/fluid/images/local_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..14adc9fd72b855bb9f74fbf2c84ac9ec0cf2b122 Binary files /dev/null and b/doc/fluid/images/local_architecture.png differ diff --git a/doc/fluid/images/lookup_table.png b/doc/fluid/images/lookup_table.png new file mode 100644 index 0000000000000000000000000000000000000000..72dfe3547f731d0d090338afb206b0549dff472e Binary files /dev/null and b/doc/fluid/images/lookup_table.png differ diff --git a/doc/fluid/images/lookup_table_training.png b/doc/fluid/images/lookup_table_training.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7cc4aeb3b885850fe2f70f19fb84d5873bed1e Binary files /dev/null and b/doc/fluid/images/lookup_table_training.png differ diff --git a/doc/fluid/images/loss_equation.png b/doc/fluid/images/loss_equation.png new file mode 100644 index 0000000000000000000000000000000000000000..14212ec8d36c803de96bde8a9a4b5591bd20434e Binary files /dev/null and b/doc/fluid/images/loss_equation.png differ diff --git a/doc/fluid/images/multi-threads.graffle b/doc/fluid/images/multi-threads.graffle new file mode 100644 index 0000000000000000000000000000000000000000..e71173715fff92a0a933d0c7d83599ba948552c6 Binary files /dev/null and b/doc/fluid/images/multi-threads.graffle differ diff --git a/doc/fluid/images/multi-threads@3x.png b/doc/fluid/images/multi-threads@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..e40a869987dbbf5019d4cb03c1dab55b74d6c9f9 Binary files /dev/null and b/doc/fluid/images/multi-threads@3x.png differ diff --git a/doc/fluid/images/multigpu_allreduce.graffle b/doc/fluid/images/multigpu_allreduce.graffle new file mode 100644 index 0000000000000000000000000000000000000000..cb5bc420ceafe8ba4c87694d44ee4e5e4ad06779 Binary files /dev/null and b/doc/fluid/images/multigpu_allreduce.graffle differ diff --git a/doc/fluid/images/multigpu_allreduce.png b/doc/fluid/images/multigpu_allreduce.png new file mode 100644 index 0000000000000000000000000000000000000000..87a1b3e8f6dd4a713ec9df9f0037d1da04e9178a Binary files /dev/null and b/doc/fluid/images/multigpu_allreduce.png differ diff --git a/doc/fluid/images/multigpu_before_convert.graffle b/doc/fluid/images/multigpu_before_convert.graffle new file mode 100644 index 0000000000000000000000000000000000000000..6c35ab1b21fb76ceae82d3693ed0d085b5bc0855 Binary files /dev/null and b/doc/fluid/images/multigpu_before_convert.graffle differ diff --git a/doc/fluid/images/multigpu_before_convert.png b/doc/fluid/images/multigpu_before_convert.png new file mode 100644 index 0000000000000000000000000000000000000000..9c8f7711165d80a2fa3911280fdee91855a401b1 Binary files /dev/null and b/doc/fluid/images/multigpu_before_convert.png differ diff --git a/doc/fluid/images/multiple_reader.png b/doc/fluid/images/multiple_reader.png new file mode 100644 index 0000000000000000000000000000000000000000..b22126b31db4982c13fc3a0827805e6aaf955046 Binary files /dev/null and b/doc/fluid/images/multiple_reader.png differ diff --git a/doc/fluid/images/paddle-compile.graffle b/doc/fluid/images/paddle-compile.graffle new file mode 100644 index 0000000000000000000000000000000000000000..a6348cc3dbcaca923c6e794681b2edb85cb9f8f6 Binary files /dev/null and b/doc/fluid/images/paddle-compile.graffle differ diff --git a/doc/fluid/images/paddle-compile.png b/doc/fluid/images/paddle-compile.png new file mode 100644 index 0000000000000000000000000000000000000000..e0f13d551ac41afaec627a57dea79356464bf0bf Binary files /dev/null and b/doc/fluid/images/paddle-compile.png differ diff --git a/doc/fluid/images/pprof_1.png b/doc/fluid/images/pprof_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8e9edbf377672d0ef40f2fc7bd39e746923550cb Binary files /dev/null and b/doc/fluid/images/pprof_1.png differ diff --git a/doc/fluid/images/pprof_2.png b/doc/fluid/images/pprof_2.png new file mode 100644 index 0000000000000000000000000000000000000000..172ba20399ba974d27f4c072425277b69b02520b Binary files /dev/null and b/doc/fluid/images/pprof_2.png differ diff --git a/doc/fluid/images/profiler.png b/doc/fluid/images/profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..d57b71ca88aaba5d05584a6219d84214e285a1e1 Binary files /dev/null and b/doc/fluid/images/profiler.png differ diff --git a/doc/fluid/images/readers.png b/doc/fluid/images/readers.png new file mode 100644 index 0000000000000000000000000000000000000000..fd59168ce16c9e2a0ef45303c28c997cfd7740be Binary files /dev/null and b/doc/fluid/images/readers.png differ diff --git a/doc/fluid/images/remote_executor.graffle b/doc/fluid/images/remote_executor.graffle new file mode 100644 index 0000000000000000000000000000000000000000..41b2067311694b56d211a4f32d1b76884eeffd2d Binary files /dev/null and b/doc/fluid/images/remote_executor.graffle differ diff --git a/doc/fluid/images/remote_executor.png b/doc/fluid/images/remote_executor.png new file mode 100644 index 0000000000000000000000000000000000000000..744e2fb2e0f1bbe058e991ba7b2a09000965ee79 Binary files /dev/null and b/doc/fluid/images/remote_executor.png differ diff --git a/doc/fluid/images/rnn.dot b/doc/fluid/images/rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..c1141cd9c981bb3cbf50d8bf7a6ed210280d79a5 --- /dev/null +++ b/doc/fluid/images/rnn.dot @@ -0,0 +1,87 @@ +digraph G { + label = "simple RNN implementation" + + ranksep=2; + + //graph [nodesep=1, ranksep=1]; + + node[nodesep=1] + + subgraph cluster0 { + label = "global scope" + rankdir = TB + W + boot_memory + input + output + } + + subgraph cluster1 { + label = "step-scope 0" + rankdir = TB + memory0[label="memory"] + prememory0[label="pre-memory"] + step_input0[label="step input"] + step_output0[label="step output"] + } + + subgraph cluster2 { + label = "step-scope 1" + rankdir = TB + memory1[label="memory"] + prememory1[label="pre-memory"] + step_input1[label="step input"] + step_output1[label="step output"] + } + + subgraph cluster3 { + label = "step-scope 2" + rankdir = TB + memory2[label="memory"] + prememory2[label="pre-memory"] + step_input2[label="step input"] + step_output2[label="step output"] + } + + stepnet [shape=box] + stepnet0 [shape=box, style=dashed] + stepnet1 [shape=box, style=dashed] + stepnet2 [shape=box, style=dashed] + + + edge[color=blue] + boot_memory -> prememory0 [label="init" color="blue"] + memory0 -> prememory1 [label="copy/reference" color="blue"] + memory1 -> prememory2 [label="copy/reference" color="blue"] + + edge[color=black] + W -> stepnet0[constraint=false, style=dashed] + W -> stepnet1[constraint=false, style=dashed] + W -> stepnet2[constraint=false, style=dashed] + + memory0 -> stepnet0[style=dashed] + prememory0 -> stepnet0 -> step_output0[style=dashed] + + memory1 -> stepnet1[style=dashed] + prememory1 -> stepnet1 -> step_output1[style=dashed] + + memory2 -> stepnet2[style=dashed] + prememory2 -> stepnet2 -> step_output2[style=dashed] + + input -> step_input0 + input -> step_input1 + input -> step_input2 + + step_input0 -> stepnet0 [style=dashed] + step_input1 -> stepnet1[style=dashed] + step_input2 -> stepnet2[style=dashed] + + step_output0 -> output + step_output1 -> output + step_output2 -> output + + stepnet0 -> stepnet[style=dashed] + stepnet1 -> stepnet[style=dashed] + stepnet2 -> stepnet[style=dashed] + +} diff --git a/doc/fluid/images/rnn.jpg b/doc/fluid/images/rnn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840 Binary files /dev/null and b/doc/fluid/images/rnn.jpg differ diff --git a/doc/fluid/images/rnn.png b/doc/fluid/images/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe Binary files /dev/null and b/doc/fluid/images/rnn.png differ diff --git a/doc/fluid/images/rnn_2level_data.dot b/doc/fluid/images/rnn_2level_data.dot new file mode 100644 index 0000000000000000000000000000000000000000..1d85ae2617a915ad0ad8288d848b607cc37ad297 --- /dev/null +++ b/doc/fluid/images/rnn_2level_data.dot @@ -0,0 +1,75 @@ +digraph G { + chapter [label="chapter"] + + subgraph cluster0 { + label = "paragraph 0" + + top_rnn0[label="top rnn step 0" shape=box] + + p0 [label="paragraph 0"] + p1 [label="paragraph 1"] + } + + subgraph cluster1{ + label = "paragraph 1" + + top_rnn1[label="top rnn step 1" shape=box] + + p2 [label="paragraph 0"] + p3 [label="paragraph 1"] + } + + subgraph cluster_p0 { + label = "sentence 0" + + low_rnn0 [label="low rnn step 0" shape=box] + s00 [label="sentence 0"] + s01 [label="sentence 1"] + + low_rnn0 -> s00 + low_rnn0 -> s01 + } + + subgraph cluster_p1 { + label = "sentence 1" + low_rnn1 [label="low rnn step 1" shape=box] + s10 [label="sentence 0"] + s11 [label="sentence 1"] + low_rnn1 -> s10 + low_rnn1 -> s11 + } + + subgraph cluster_p2 { + label = "sentence 1" + low_rnn2 [label="low rnn step 0" shape=box] + s20 [label="sentence 0"] + s21 [label="sentence 1"] + low_rnn2 -> s20 + low_rnn2 -> s21 + } + + subgraph cluster_p3 { + label = "sentence 1" + low_rnn3 [label="low rnn step 1" shape=box] + s30 [label="sentence 0"] + s31 [label="sentence 1"] + low_rnn3 -> s30 + low_rnn3 -> s31 + } + + + chapter -> top_rnn0 + chapter -> top_rnn1 + + top_rnn0 -> p0 + top_rnn0 -> p1 + top_rnn1 -> p2 + top_rnn1 -> p3 + + + p0 -> low_rnn0 + p1 -> low_rnn1 + p2 -> low_rnn2 + p3 -> low_rnn3 + +} diff --git a/doc/fluid/images/rnn_2level_data.png b/doc/fluid/images/rnn_2level_data.png new file mode 100644 index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6 Binary files /dev/null and b/doc/fluid/images/rnn_2level_data.png differ diff --git a/doc/fluid/images/single-thread@3x.png b/doc/fluid/images/single-thread@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..4083aebfdd45af5fbac25fa2c4176bc08c3cb44a Binary files /dev/null and b/doc/fluid/images/single-thread@3x.png differ diff --git a/doc/fluid/images/sparse_update.graffle b/doc/fluid/images/sparse_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..08d689a58f83698d8c1158ee3990ed8abf3a7a9a Binary files /dev/null and b/doc/fluid/images/sparse_update.graffle differ diff --git a/doc/fluid/images/sparse_update.png b/doc/fluid/images/sparse_update.png new file mode 100644 index 0000000000000000000000000000000000000000..8c872e6ac479f7d1b818a4a207956c43155d0ad7 Binary files /dev/null and b/doc/fluid/images/sparse_update.png differ diff --git a/doc/fluid/images/test.dot b/doc/fluid/images/test.dot new file mode 100644 index 0000000000000000000000000000000000000000..62c69b8fc8010a26a54a6ee8ef1488aad94d747a --- /dev/null +++ b/doc/fluid/images/test.dot @@ -0,0 +1,35 @@ + +digraph Test { + z -> generator -> G_img; + G_img -> discriminator -> D_f -> d_loss_f; + label0 -> d_loss_f -> d_loss; + + img -> discriminator -> D_t -> d_loss_t; + label1 -> d_loss_t -> d_loss; + + d_loss -> d_loss_t[color=red, style=dashed]; + d_loss -> d_loss_f[color=red, style=dashed]; + d_loss_t -> D_t[color=red, style=dashed]; + d_loss_f -> D_f[color=red, style=dashed]; + D_t -> discriminator[color=red, style=dashed]; + D_f -> discriminator[color=red, style=dashed]; + + D_f -> g_loss; + label2 -> g_loss; + + g_loss -> D_f[color=green, style=dashed]; + D_f -> discriminator[color=green, style=dashed]; + discriminator -> G_img[color=green, style=dashed]; + G_img -> generator[color=green, style=dashed]; + + discriminator [color=red, shape=box]; + generator [color=green, shape=box]; + z [shape=diamond]; + img [shape=diamond]; + label0 [shape=diamond]; + label1 [shape=diamond]; + label2 [shape=diamond]; + + d_loss [color=red]; + g_loss [color=green]; +} diff --git a/doc/fluid/images/test.dot.png b/doc/fluid/images/test.dot.png new file mode 100644 index 0000000000000000000000000000000000000000..4e121a40b9f7b2232d7cdda315bad15926446f55 Binary files /dev/null and b/doc/fluid/images/test.dot.png differ diff --git a/doc/fluid/images/theta_star.gif b/doc/fluid/images/theta_star.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd24d33e124396be3fc410c9b12f33148f64efe2 Binary files /dev/null and b/doc/fluid/images/theta_star.gif differ diff --git a/doc/fluid/images/timeline.jpeg b/doc/fluid/images/timeline.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..38ec3f80c982857531f30a8bb0fa26ea5bf05385 Binary files /dev/null and b/doc/fluid/images/timeline.jpeg differ diff --git a/doc/fluid/images/tracing.jpeg b/doc/fluid/images/tracing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3a49fc4f8a401a9463b0157e2f38c164ca02dcc5 Binary files /dev/null and b/doc/fluid/images/tracing.jpeg differ diff --git a/doc/fluid/index_cn.rst b/doc/fluid/index_cn.rst index be3bed4393a7346d4f2a53e2c7409ee7165fb5b6..d878d192cae7ee9e8b8fdb4f615839c186fdf334 100644 --- a/doc/fluid/index_cn.rst +++ b/doc/fluid/index_cn.rst @@ -5,8 +5,8 @@ :maxdepth: 1 getstarted/index_cn.rst - design/index_cn.rst build_and_install/index_cn.rst + design/index_cn.rst howto/index_cn.rst dev/index_cn.rst faq/index_cn.rst diff --git a/doc/fluid/index_en.rst b/doc/fluid/index_en.rst index 87c831420a57b4b9ce77ecf44f7f4d0feec833a6..2bc76b58982cf50e637d15cca0c5d78166aa73a9 100644 --- a/doc/fluid/index_en.rst +++ b/doc/fluid/index_en.rst @@ -5,8 +5,8 @@ :maxdepth: 1 getstarted/index_en.rst - design/index_en.rst build_and_install/index_en.rst + design/index_en.rst howto/index_en.rst dev/index_en.rst faq/index_en.rst diff --git a/doc/mobile/CMakeLists.txt b/doc/mobile/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b104a6318d474d6531670b8ac3569448774850c7 --- /dev/null +++ b/doc/mobile/CMakeLists.txt @@ -0,0 +1,53 @@ +if(NOT DEFINED SPHINX_THEME) + set(SPHINX_THEME default) +endif() + +if(NOT DEFINED SPHINX_THEME_DIR) + set(SPHINX_THEME_DIR) +endif() + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_mobile_docs + html + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_EN}) + +add_dependencies(paddle_mobile_docs gen_proto_py paddle_python) + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.cn.in" + "${BINARY_BUILD_DIR_CN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_mobile_docs_cn + html + ${BINARY_BUILD_DIR_CN} + ${SPHINX_CACHE_DIR_CN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_CN}) + +add_dependencies(paddle_mobile_docs_cn gen_proto_py paddle_python) diff --git a/doc/mobile/index_cn.rst b/doc/mobile/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8297316e8fbb2b8f41954030293feadbcd81295e --- /dev/null +++ b/doc/mobile/index_cn.rst @@ -0,0 +1,9 @@ +移动端 +===== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_cn.md + cross_compiling_for_ios_cn.md + cross_compiling_for_raspberry_cn.md \ No newline at end of file diff --git a/doc/mobile/index_en.rst b/doc/mobile/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0acdff0284e3bc84b2cc4a34a142ee01754f940 --- /dev/null +++ b/doc/mobile/index_en.rst @@ -0,0 +1,9 @@ +Mobile +====== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_en.md + cross_compiling_for_ios_en.md + cross_compiling_for_raspberry_en.md diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 260b6c9fd1b364433cae098bacea77aa7fe9e266..76b82fd97f1ed642696c4414676b694ebda9ad81 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -13,7 +13,7 @@ # serve to show the default. import sys import os, subprocess -sys.path.insert(0, os.path.abspath('@PADDLE_SOURCE_DIR@/python')) +sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform import paddle diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index e5757b86b43001bc6090d8edd0aaa5ff4fc476ee..5aa5c1381fa3fad4ebc181c7868da03ae0138016 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -13,7 +13,7 @@ # serve to show the default. import sys import os, subprocess -sys.path.insert(0, os.path.abspath('@PADDLE_SOURCE_DIR@/python')) +sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform import paddle diff --git a/doc/v2/CMakeLists.txt b/doc/v2/CMakeLists.txt index 286fe8845cd7a909d4030540e72362864b536063..be957d37b14c618e9346251b3bd3dbaf1541773f 100644 --- a/doc/v2/CMakeLists.txt +++ b/doc/v2/CMakeLists.txt @@ -20,13 +20,15 @@ configure_file( "${BINARY_BUILD_DIR_EN}/conf.py" @ONLY) -sphinx_add_target(paddle_docs +sphinx_add_target(paddle_v2_docs html ${BINARY_BUILD_DIR_EN} ${SPHINX_CACHE_DIR_EN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) +add_dependencies(paddle_v2_docs gen_proto_py paddle_python) + # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -41,11 +43,13 @@ configure_file( "${BINARY_BUILD_DIR_CN}/conf.py" @ONLY) -sphinx_add_target(paddle_docs_cn +sphinx_add_target(paddle_v2_docs_cn html ${BINARY_BUILD_DIR_CN} ${SPHINX_CACHE_DIR_CN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) +add_dependencies(paddle_v2_docs_cn gen_proto_py paddle_python) + add_subdirectory(api) diff --git a/doc/v2/api/CMakeLists.txt b/doc/v2/api/CMakeLists.txt index 2ad589e8a260e48d46cba2300d6e2bcd4bdd8019..2670a21a227546ffcee4f10f395feef3c58df9b4 100644 --- a/doc/v2/api/CMakeLists.txt +++ b/doc/v2/api/CMakeLists.txt @@ -12,9 +12,11 @@ configure_file( "${BINARY_BUILD_DIR_EN}/conf.py" @ONLY) -sphinx_add_target(paddle_api_docs +sphinx_add_target(paddle_v2_apis html ${BINARY_BUILD_DIR_EN} ${SPHINX_CACHE_DIR_EN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) + +add_dependencies(paddle_v2_apis gen_proto_py framework_py_proto copy_paddle_pybind paddle_python) diff --git a/doc/v2/build_and_install/index_en.rst b/doc/v2/build_and_install/index_en.rst index 7e0ca5bcbdbad0a3c97c0045bb57b51137668161..5b3de0f8c3e5496060646b5ddb080d0d338a8bfa 100644 --- a/doc/v2/build_and_install/index_en.rst +++ b/doc/v2/build_and_install/index_en.rst @@ -1,32 +1,56 @@ -Install and Build -================= +install and Compile +========== .. _install_steps: -Install Steps -++++++++ +PaddlePaddle provides various methods of installation for many different users -You can choose either pip or Docker to complete your install: +Focus on Deep Learning Model Development +----------------- + +PaddlePaddle provides lots of packages of python wheel , that pip can install: .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - pip_install_en.rst - docker_install_en.rst + pip_install_en.rst -Build from Source ------------------ +This is the most convenient way of installation. Please choose the right installation package with machine configure and system. + +Follow the Bottom Frame +---------- + +PaddlePaddle also supports installation using Docker. Please refer to the tutorial below: + +.. toctree:: + :maxdepth: 1 + + docker_install_en.rst -.. warning:: +We recommend running PaddlePaddle in Docker. This method has the following advantages: - We recommend to directly install via above installation steps, you'll only need to build PaddlePaddle from source when you need a modifed binary. +- Does not require installation of third-party dependencies. +- Easy to share runtime environment. -.. toctree:: +Lastly, users can also compile and install PaddlePaddle from source code. The instructions are below: + +.. toctree:: :maxdepth: 1 - build_from_source_en.md + build_from_source_en.rst + +.. warning:: + + One caveat with this approach is that developers will have to download, compile and install all third-party dependencies. Thus this process of installation is more time consuming. + FAQ -++++++++++ +----------- + +For any problems during installation, please refer to the page below for answers: + +:ref:`常见问题解答 ` + +If the problem still persists, you are welcome to seek assistance from the PaddlePaddle community: -`FAQ `_ +`创建issue `_ diff --git a/doc/fluid/design/interface/00.why_plain_c.md b/doc/v2/design/interface/00.why_plain_c.md similarity index 100% rename from doc/fluid/design/interface/00.why_plain_c.md rename to doc/v2/design/interface/00.why_plain_c.md diff --git a/doc/fluid/design/interface/01.inference_implementation.md b/doc/v2/design/interface/01.inference_implementation.md similarity index 100% rename from doc/fluid/design/interface/01.inference_implementation.md rename to doc/v2/design/interface/01.inference_implementation.md diff --git a/doc/v2/design/interface/index_cn.rst b/doc/v2/design/interface/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2509a5c5f4182d8ce3a16a3b7bd92c0d7bf5b056 --- /dev/null +++ b/doc/v2/design/interface/index_cn.rst @@ -0,0 +1,7 @@ +多语言接口 +------------ + +.. toctree:: + :maxdepth: 1 + + 00.why_plain_c.md diff --git a/doc/v2/design/interface/index_en.rst b/doc/v2/design/interface/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..356e58c39c5ef6ee5ee50ab999b85f88628bfb85 --- /dev/null +++ b/doc/v2/design/interface/index_en.rst @@ -0,0 +1,7 @@ +Multilingual Interface +----------------------- + +.. toctree:: + :maxdepth: 1 + + 00.why_plain_c.md diff --git a/doc/v2/design/mkl/mkldnn.md b/doc/v2/design/mkl/mkldnn.md index e2fe1e6b26ffa73fda81863abfadf697c0acbfcf..1bd2e7bc34ee79eb753b3520d97e5e7beca89b0b 100644 --- a/doc/v2/design/mkl/mkldnn.md +++ b/doc/v2/design/mkl/mkldnn.md @@ -44,7 +44,7 @@ MKL,MKLML以及MKL-DNN三者关系如下表: | Name | Open Source | License | Descriptions | | :---------- | :--------------- | :---------- | :------------ | -| MKL | No | Proprietary | Accelerate math processing routines | +| MKL | No | Proprietary | Accelerate math processing routines | | MKLML | No | Proprietary | Small package of MKL, especially for Machine Learning | | MKL-DNN | Yes | Apache 2.0 | Accelerate primitives processing routines especially for Deep Neural Networks | @@ -89,7 +89,7 @@ PaddlePaddle/Paddle ### CMake 在`CMakeLists.txt`中提供一个与MKL有关的总开关:`WITH_MKL`,它负责决定编译时是否使用MKLML和MKL-DNN -- `WITH_MKLML` 控制是否使用MKLML库。 +- `WITH_MKLML` 控制是否使用MKLML库。 当打开`WITH_MKL`时,会自动使用MKLML库作为PaddlePaddle的CBLAS和LAPACK库,同时会开启Intel OpenMP用于提高MKLML的性能。 编译时会把对应的头文件和库放在`build/third_party/install/mklml/*`目录下对应的地方。 MKLML的库目前都是动态库,主要包括`libiomp5.so`和`libmklml_intel.so`。 @@ -172,7 +172,7 @@ if use_mkldnn self.layer_type = mkldnn_* ``` -所有MKL-DNN的`layer_type`会以*mkldnn_*开头,这些会在`MKLDNN*Layer`注册layer的时候保证,以示区分。 +所有MKL-DNN的`layer_type`会以*mkldnn_*开头,这些会在`MKLDNN*Layer`注册layer的时候保证,以示区分。 同时,会在`paddle/utils.Flags`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 diff --git a/doc/v2/dev/write_docs_cn.rst b/doc/v2/dev/write_docs_cn.rst index 23615f8830e99633676c83ec5d28139a732c623c..4231f2bb5cd800c0cd86835b5d07e491fcde4989 100644 --- a/doc/v2/dev/write_docs_cn.rst +++ b/doc/v2/dev/write_docs_cn.rst @@ -65,39 +65,55 @@ PaddlePaddle.org工具可以配合Docker使用,需要在系统里先安装好D 不使用PaddlePaddle.org工具 -------------------------- -使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 +使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。该方法与 `从源码编译PaddlePaddle `_ 相似,通过从源码中构建可用于编译PaddlePaddle文档的Docker镜像并运行,在进入Docker容器后使用源码中的脚本构建PaddlePaddle文档,具体步骤如下: -[TBD] +.. code-block:: bash + + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + + # 从源码中构建可用于编译PaddlePaddle文档的Docker镜像 + docker build -t paddle:dev . + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" -e "WITH_DOC=ON" paddle:dev /bin/bash + + # 进入Docker容器后使用build.sh脚本构建PaddlePaddle文档 + bash -x /paddle/paddle/scripts/docker/build.sh + +注:上述命令把当前目录(源码根目录)映射为 container 里的 :code:`/paddle` 目录。 + +编译完成后,会产生 ``doc/v2`` 和 ``doc/fluid`` 两个目录,在这两个目录下分别都生成 ``cn/html/`` 、 ``en/html`` 、 ``api/en/html`` 共三个子目录,分别进入这些目录下,执行以下命令: + +.. code-block:: bash + + python -m SimpleHTTPServer 8088 + +在浏览器中输入 http://localhost:8088 就可以看到编译生成的 ``v2`` 和 ``fluid`` 两种版本的中/英文的文档页面和英文的API页面。 如果不想使用Docker,也可以使用以下命令直接构建PaddlePaddle文档,即 .. code-block:: bash - mkdir paddle - cd paddle git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle mkdir -p build cd build cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON # 如果只需要构建使用文档,则执行以下命令 - make -j $processors gen_proto_py - make -j $processors paddle_docs paddle_docs_cn + make -j $processors paddle_docs # 如果只需要构建API,则执行以下命令 - make -j $processors gen_proto_py framework_py_proto - make -j $processors copy_paddle_pybind - make -j $processors paddle_api_docs + make -j $processors paddle_apis 其中$processors代表启动和CPU核一样多的进程来并行编译,可以根据本机的CPU核数设置相应的值。 -编译完成后,进入 ``doc/v2`` 目录,如果选择构建文档则会在该目录下生成 ``cn/html/`` 、 ``en/html`` 两个子目录,选择构建API则会生成 ``api/en/html`` 目录,分别进入这些目录下,执行以下命令: +编译完成后,同样会产生 ``doc/v2`` 和 ``doc/fluid`` 两个目录,如果选择构建文档则会在这两个目录下分别都生成 ``cn/html/`` 、 ``en/html`` 两个子目录,选择构建API则会在这两个目录下分别生成 ``api/en/html`` 目录,分别进入这些子目录下,执行以下命令: .. code-block:: bash python -m SimpleHTTPServer 8088 -在浏览器中输入 http://localhost:8088 就可以看到编译生成的中/英文的文档页面和英文的API页面,下图为生成的英文文档首页示例。注意,示例中由于使用了sphinx的原始主题,所以页面的风格与官网并不一致,但这并不影响开发者进行调试。 +在浏览器中输入 http://localhost:8088 就可以看到编译生成的 ``v2`` 和 ``fluid`` 两种版本的中/英文的文档页面和英文的API页面。下图为生成的 ``v2`` 英文文档首页示例。注意,示例中由于使用了sphinx的原始主题,所以页面的风格与官网并不一致,但这并不影响开发者进行调试。 .. image:: src/doc_en.png :align: center diff --git a/doc/v2/dev/write_docs_en.rst b/doc/v2/dev/write_docs_en.rst index 15ff0d34ad622f100fe98d8738b830e47c35b41b..6105455e202e4704aa25f0fd9916b9b61a569702 100644 --- a/doc/v2/dev/write_docs_en.rst +++ b/doc/v2/dev/write_docs_en.rst @@ -68,39 +68,56 @@ Please `click here `_ on how to install Docker. After Docker is installed, you could use the scripts in the source directory to build the documentation. +Build PaddlePaddle's documentation with Docker,you need to install Docker first. Please refer to `Docker's official website `_ on how to install Docker. This method is quite similar to ` Build From Sources `_ , by constructing, from source code, a docker image that can be used to build PaddlePaddle documentation. Enter the Docker container and use the script ``build.sh`` in the source directory to build the PaddlePaddle documentation. The specific steps are as follows: -[TBD] +.. code-block:: bash + + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + + # Construct a docker image from source code + docker build -t paddle:dev . + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" -e "WITH_DOC=ON" paddle:dev /bin/bash + + # Use build.sh to build PaddlePaddle documentation + bash -x /paddle/paddle/scripts/docker/build.sh + +Note: The above commands maps the current directory (source root directory) to the :code:`/paddle` directory in the container. + +After compiling, there should be two generated directories: ``doc/v2`` and ``doc/fluid``, where three subdirectories ``cn/html/``, ``en/html`` and ``api/en/html`` are generated. Please enter these directories respectively and execute the following commands: + +.. code-block:: bash + + python -m SimpleHTTPServer 8088 + +Use a web browser and navigate to http://localhost:8000, you could see the compiled ``v2`` 's and ``fluid`` 's Chinese/English documents page and English APIs page. If you do not wish to use Docker, you can also use the following commands to directly build the PaddlePaddle documentation. .. code-block:: bash - mkdir paddle - cd paddle + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle mkdir -p build cd build cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON # If you only need to build documents, use the following commands - make -j $processors gen_proto_py - make -j $processors paddle_docs paddle_docs_cn + make -j $processors paddle_docs # If you only need to build APIs, use the following commands - make -j $processors gen_proto_py framework_py_proto - make -j $processors copy_paddle_pybind - make -j $processors paddle_api_docs + make -j $processors paddle_apis $processors indicates that as many processes as the CPU cores are started to compile in parallel. It should be set according to the number of CPU cores of your machine. -After the compilation is complete, enter the ``doc/v2`` directory. If you chose to build documents, it will generate ``cn/html/`` and ``en/html`` subdirectories under this directory. If you chose to build APIs,it will generate``api/en/html`` subdirectory. Please enter these directories respectively and execute the following commands: +After compiling, there also should be two generated directories: ``doc/v2`` and ``doc/fluid`` . If you chose to build documents, two subdirectories ``cn/html/`` and ``en/html`` will be generated in both two directories. If you chose to build APIs,a subdirectory ``api/en/html`` will be generated. Please enter these directories respectively and execute the following commands: .. code-block:: bash python -m SimpleHTTPServer 8088 -Use a web browser and navigate to http://localhost:8000, you could see the compiled Chinese/English documents page and the English APIs page. The following figure is an example of the built English documents home page. Note that due to the sphinx's original theme used in the example, the style of the page is not consistent with the official website, but this does not affect the developer's debugging. +Use a web browser and navigate to http://localhost:8000, you could see the compiled ``v2`` 's and ``fluid`` 's Chinese/English documents page and English APIs page. The following figure is an example of the built ``v2`` 's English documents home page. Note that due to the sphinx's original theme used in the example, the style of the page is not consistent with the official website, but this does not affect the developer's debugging. .. image:: src/doc_en.png :align: center diff --git a/doc/v2/faq/build_and_install/index_cn.rst b/doc/v2/faq/build_and_install/index_cn.rst index 7c7e896d187e4fe1544d7ec933fa4fa9f24df3cd..f292684fb5fe2df06db5239e7f43fdfa1dd2f2bd 100644 --- a/doc/v2/faq/build_and_install/index_cn.rst +++ b/doc/v2/faq/build_and_install/index_cn.rst @@ -139,3 +139,77 @@ PaddlePaddle使用avx SIMD指令提高cpu执行效率,因此错误的使用二 touch ../extern_mklml-stamp/extern_mklml-download // 4. 接着编译即可 + +9. 在Mac上无法安装numpy等Python包,权限错误 +------------------ + +Mac上对自带的Python和包有严格的权限保护,最好不要在自带的Python上安装。建议用virtualenv建立一个新的Python环境来操作。 + +virtualenv的基本原理是将机器上的Python运行所需的运行环境完整地拷贝一份。我们可以在一台机器上制造多份拷贝,并在这多个拷贝之间自由切换,这样就相当于在一台机器上拥有了多个相互隔离、互不干扰的Python环境。 + +下面简单介绍下如何用virtualenv为Paddle生成一个专用的Python环境: + +安装virtualenv: +:::::::::::::::: + +virtualenv本身也是Python的一个包,可以用pip进行安装: + +.. code-block:: bash + + sudo -H pip install virtualenv + +由于virtualenv需要安装给系统自带的Python,因此需要使用sudo权限。 + +创建一个新的Python运行环境: +::::::::::::::::::: + +.. code-block:: bash + + virtualenv --no-site-packages paddle + +--no-site-packages 参数表示不拷贝已有的任何第三方包,创造一个完全干净的新Python环境。后面的paddle是我们为这个新创建的环境取的名字。 + +执行完这一步后,当前目录下应该会出现一个名为paddle(或者你取的其他名字)的目录。这个目录里保存了运行一个Python环境所需要的各种文件。 + +启动运行环境: +:::::::::::::::: + +.. code-block:: bash + + source paddle/bin/activate + +执行后会发现命令提示符前面增加了(paddle)字样,说明已经成功启动了名为‘paddle’的Python环境。执行which python,可以发现使用的已经是刚刚创建的paddle目录下的Python。 + +在这个环境中,我们可以自由地进行Paddle的安装、使用和开发工作,无需担心对系统自带Python的影响。 + +退出运行环境: +::::::::::::::: + +直接执行: + +.. code-block:: bash + + deactivate + +可以看到命令提示符前面的(paddle)字样消失。 + +自动启动某一Python环境: +:::::::::::::::: + +如果我们经常使用Paddle,我们每次打开终端后都需要执行一下source paddle/bin/activate来启动环境,比较繁琐。为了简便,可以修改终端的配置文件,来让终端每次启动后自动启动特定的Python环境。 + +执行: + +.. code-block:: bash + + vi ~/.bash_profile + +打开终端配置文件,并在文件的最后添加一行: + +.. code-block:: bash + + source paddle/bin/activate + +保存并关闭文件。 + +这样,每次打开终端时就会自动启动名为‘paddle’的Python环境了。 diff --git a/doc/v2/faq/build_and_install/index_en.rst b/doc/v2/faq/build_and_install/index_en.rst index 614db457d715665073cec1a495d4d7df6887532f..7488ed8137d57785f36b9f1e1ed1269f864960bc 100644 --- a/doc/v2/faq/build_and_install/index_en.rst +++ b/doc/v2/faq/build_and_install/index_en.rst @@ -1,5 +1,143 @@ -############################ -Install, Build and Unit test -############################ +.. _install_faq: -TBD +############################### +Compile, Install, and Unit Test +############################### + +.. contents:: + +1. Insufficient CUDA driver version +---------------------------------------------------------------- + +Many users usually face issues like `Cuda Error: CUDA driver version is insufficient for CUDA runtime version` when running the PaddlePaddle GPU Docker image. The cause is that you may not map the local CUDA driver to a container directory. +You can solve the issue by running the following commands: + +.. code-block:: bash + + $ export CUDA_SO="$(\ls usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + $ docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu + +For more infomation about Docker's installation and usage, please refer to `PaddlePaddle Docker documentation `_ . + + +2. Version mismatch between PythonLibs and PythonInterpreter +---------------------------------------------------------------- + +It is a common bug when CMake looks up Python. If you install multiple versions of Python, Cmake may find the version mismatch between PythonLibs and PythonInterpreter . You are forced to specify a Python version, as follows. + + .. code-block:: bash + + cmake .. -DPYTHON_EXECUTABLE= -DPYTHON_LIBRARY= -DPYTHON_INCLUDE_DIR= + +You should specify ````, ````, ```` to your local paths. + +3. PaddlePaddle version is 0.0.0 +------------------------------------------------ +This issue would happen when you run the code `paddle version` or `cmake ..` + +.. code-block:: bash + + CMake Warning at cmake/version.cmake:20 (message): + Cannot add paddle version from git tag + +You should pull all remote branches to your local machine with the command :code:`git fetch upstream` and then run :code:`cmake` + +4. paddlepaddle\*.whl is not a supported wheel on this platform. +------------------------------------------------------------------------ + +The primary cause for this issue is that it can not find the correct PaddlePaddle installation package that matches your current system.The latest PaddlePaddle Python installation package supports Linux x86_64 and MacOS 10.12 os including Python2.7 and Pip 9.0.1. + +You can upgrade Pip with the following command\: + +.. code-block:: bash + + pip install --upgrade pip + +If it does not work for you, you can run the command :code:`python -c "import pip; print(pip.pep425tags.get_supported())"` to get the suffix of Python package which your system may support and then compare it with the suffix of your installation. + +If the system supports :code:`linux_x86_64` and the installation package is :code:`manylinux1_x86_64`, you should upgrade pip to the latest + +if the system supports :code:`manylinux_x86_64` and the local installation package is :code:`linux1_x86_64`, you can rename the whl package to :code:`manylinux1_x86_64` and then try again. + + +5. ImportError: No module named v2 +---------------------------------- +Please uninstall Paddle V1 if you have installed it before. + +.. code-block:: bash + + pip uninstall py_paddle paddle + +Then install Python for PaddlePaddle , enter the build directory and run the following commands + +pip install python/dist/paddle*.whl && pip install ../paddle/dist/py_paddle*.whl + +6. Illegal instruction +----------------------- +This issue may be caused by the wrong usage of PaddlePaddle binary version which uses avx SIMD instructions to increase the performance of cpu. Please choose the correct version. + +7. Python unittest fails +-------------------------------- + +If the following python unittest testcases fail: + +.. code-block:: bash + + 24 - test_PyDataProvider (Failed) + 26 - test_RecurrentGradientMachine (Failed) + 27 - test_NetworkCompare (Failed) + 28 - test_PyDataProvider2 (Failed) + 32 - test_Prediction (Failed) + 33 - test_Compare (Failed) + 34 - test_Trainer (Failed) + 35 - test_TrainerOnePass (Failed) + 36 - test_CompareTwoNets (Failed) + 37 - test_CompareTwoOpts (Failed) + 38 - test_CompareSparse (Failed) + 39 - test_recurrent_machine_generation (Failed) + 40 - test_PyDataProviderWrapper (Failed) + 41 - test_config_parser (Failed) + 42 - test_swig_api (Failed) + 43 - layers_test (Failed) + +Please check the PaddlePaddle unittest logs which may suggest the following: + +.. code-block:: bash + + paddle package is already in your PYTHONPATH. But unittest need a clean environment. + Please uninstall paddle package before start unittest. Try to 'pip uninstall paddle'. + +The solution is: + +* Remove old PaddlePaddle to make a clean environment for the unit tests. If PaddlePaddle package is already in Python's site-packages, unit tests would refer Python package in site-packages instead of Python package in the :code:`/python` directory of the source directory. Setting :code:`PYTHONPATH` to :code:`/python` is also useless because Python's search path would give the priority to the installed Python package. + + +8. Failed to download the MKLML library +---------------------------------------------- + +.. code-block:: bash + + make[2]: *** [third_party/mklml/src/extern_mklml-stamp/extern_mklml-download] error 4 + make[1]: *** [CMakeFiles/extern_mklml.dir/all] error 2 + make[1]: *** waiting for the unfinished jobs.... + +Cause: The network speed or SSL link causes the MKLML library to download unsuccessfully. + +The solution is: manually download and install, the specific steps are as follows. + +.. code-block:: bash + + // 1. enter the directory + cd build/third_party/mklml/src/extern_mklml + + // 2. check the size of the package, normally 75M, if less than 75M, the download fails + du -sh mklml_lnx_2018.0.1.20171007.tgz + + // 3. manually download and unzip and make the download success tag: + wget --no-check-certificate https://github.com/01org/mkl-dnn/releases/download/v0.11/mklml_lnx_2018.0.1.20171007.tgz -c -O mklml_lnx_2018.0.1.20171007.tgz + tar zxf mklml_lnx_2018.0.1.20171007.tgz + touch ../extern_mklml-stamp/extern_mklml-download + + // 4. then compile + diff --git a/doc/v2/faq/cluster/index_en.rst b/doc/v2/faq/cluster/index_en.rst index 855b7e8e53307b82a72c156be4ef509e27edf822..fa942a09625bef78b28456beeb735272b686e061 100644 --- a/doc/v2/faq/cluster/index_en.rst +++ b/doc/v2/faq/cluster/index_en.rst @@ -2,4 +2,15 @@ Cluster Training and Prediction ############################### -TBD +.. contents:: + +1. Network connection errors in the log during multi-node cluster training +------------------------------------------------ +There are maybe some errors in the log belonging to network connection problem during multi-node cluster training, for example, :code:`Connection reset by peer`. +This kind of error is usually caused by the abnormal exit of a training process in some node, and the other nodes cannot connect with this node any longer. Steps to troubleshoot the problem are as follows: + +* Find the first error in the :code:`train.log`, :code:`server.log`, check whether other fault casued the problem, such as FPE, lacking of memory or disk. + +* If the first error in server.log says "Address already used", this may be caused by the port conflict of the non-exclusive execution. Connect the sys-admin to check if the current MPI cluster supports jobs submitted with parameter :code:`resource=full`. If the current MPI cluster does not support this parameter, change the server port and try agian. + +* If the current MPI cluster does not support exclusive pattern which allows a process to occupy the whole node, ask the administrator to replace or update the this cluster. diff --git a/doc/v2/faq/model/index_en.rst b/doc/v2/faq/model/index_en.rst index cb26f59655f97dc28a2047994643ae16b8857964..67a33e08e192e5627ac3b0abd76e979f21ed2079 100644 --- a/doc/v2/faq/model/index_en.rst +++ b/doc/v2/faq/model/index_en.rst @@ -2,4 +2,80 @@ Model Configuration ################### -TBD +.. contents:: + +1. How to deal with error :code:`Duplicated layer name` +---------------------------------------------------------- + +The general reason for this error is that users may have set the same value for the attribute :code:`name` in different layers. Try to find out the :code:`name` attribute with the same value in diffrent layers and set them differently. + +2. How to use :code:`paddle.layer.memory`'s attribute :code:`name` +---------------------------------------------------------------------- + +* :code:`paddle.layer.memory` is used to get the output of a layer's last timestep and the layer is specified by the attribute :code:`name` . Thus, :code:`paddle.layer.memory` will associate with the layer that has the same value of attribute :code:`name` , and uses the output of the layer's last timestep as the input of its current timestep. + +* All the PaddlePaddle's layers have a unique name, which is set by the attribute :code:`name` . PaddlePaddle will automatically set it for the user when it is not explicitly set. :code:`paddle.layer.memory` is not a real layer, its name is set by the attribute :code:`memory_name` and PaddlePaddle will also automatically set it when the user does not explicitly set. The :code:`paddle.layer.memory` attribute :code:`name` is used to specify the layer it is associated with, and needs to be explicitly set by the user. + + +3. What is the difference between the two ways of using dropout +----------------------------------------------------------------- + +* There are two ways to use dropout in PaddlePaddle + + * Set the :code:`drop_rate` parameter in the layer's :code:`layer_atter` attribute. Take :code:`paddle.layer.fc` as an example: + + .. code-block:: python + + fc = paddle.layer.fc(input=input, layer_attr=paddle.attr.ExtraLayerAttribute(drop_rate=0.5)) + + * Use :code:`paddle.layer.dropout` layer. Take :code:`paddle.layer.fc` as an example: + + .. code-block:: python + + fc = paddle.layer.fc(input=input) + drop_fc = paddle.layer.dropout(input=fc, dropout_rate=0.5) + +* :code:`paddle.layer.dropout` actually uses the :code:`paddle.layer.add_to` layer and sets :code:`drop_rate` as the previous method. This method is very memory intensive. + +* PaddlePaddle implements dropout in the activation function rather than in the layer. + +* :code:`paddle.layer.lstmemory`, :code:`paddle.layer.grumemory`, :code:`paddle.layer.recurrent` implement activation of output in an unusual way, so we cannot use dropout by setting :code:`drop_rate` . To use dropout for these layers, we could use the second method, which is to use :code:`paddle.layer.dropout`. + +4. The differences between different recurrent layers +-------------------------------------------------------- +Take LSTM as an example. There are several kinds of recurrent layers in PaddlePaddle: + +* :code:`paddle.layer.lstmemory` +* :code:`paddle.networks.simple_lstm` +* :code:`paddle.networks.lstmemory_group` +* :code:`paddle.networks.bidirectional_lstm` + +According to implementations, recurrent layer can be classified into 2 types: + +1. Recurrent layer implemented by recurrent_group: + + * Using this type of recurrent layers, users can access the intermediate value calculated by the recurrent unit within a timestep (eg: hidden states, memory cells, etc.) + * :code:`paddle.networks.lstmemory_group` belongs to this type of recurrent layers. + +2. Recurrent layer implemented as a complete operation: + + * Users can only access output values when using this type of recurrent layers. + * :code:`paddle.networks.lstmemory_group` , :code:`paddle.networks.simple_lstm` and :code:`paddle.networks.bidirectional_lstm` belong to this type of recurrent layer; + +By implementing recurrent layer as a complete operation, CPU and GPU calculations can be optimized. Therefore, the second type of recurrent layer is more efficient than the first one. In practical applications, we propose to use the second type of recurrent layers if there is no need to access the intermediate variable of LSTM. + +In addition, PaddlePaddle also contains a kind of LSTM calculation unit: :code:`paddle.networks.lstmemory_unit`: + + * Unlike the recurrent layer described above, :code:`paddle.networks.lstmemory_unit` defines the computational process of an LSTM unit in a timestep. It is not a complete recurrent layer, nor can it receive sequence data as input. + * :code:`paddle.networks.lstmemory_unit` can only be used as a step function in recurrent_group. + +5. Can Softmax's calculation dimension be specified? +-------------------------------------------------------------------- + +We can't specify calculation dimension for PaddlePaddle's softmax. It can only be calculated by rows. +In image tasks, for NCHW, if you need to calculate softmax in C dimension, you could use :code:`paddle.layer.switch_order` to change the dimension order, that is, convert NCHW to NHWC, then do the reshape operation and calculate softmax. + +6. Does PaddlePaddle support variable-dimensional data inputs +---------------------------------------------------------------- + +PaddlePaddle provides :code:`paddle.data_type.dense_array` to support variable-dimensional data input. Simply set the dimension of the data layer to a value larger than the dimension of the input data for occupancy. diff --git a/doc/v2/howto/capi/compile_paddle_lib_en.md b/doc/v2/howto/capi/compile_paddle_lib_en.md index 11d69b9b79c1a41898d3060d3fe25a31330334a3..6212a3081116d988630706e83d2349dd200b73ab 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_en.md +++ b/doc/v2/howto/capi/compile_paddle_lib_en.md @@ -1,3 +1,175 @@ ## Install and Build -TBD +### Download & Install + + Download the latest C-API development package from CI system and install. You can find the required version in the table below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Version TipsC-API
cpu_avx_mklpaddle.tgz
cpu_avx_openblas-
cpu_noavx_openblaspaddle.tgz
cuda7.5_cudnn5_avx_mklpaddle.tgz
cuda8.0_cudnn5_avx_mklpaddle.tgz
cuda8.0_cudnn7_avx_mklpaddle.tgz
+ +### From source + + Users can also compile the C-API library from PaddlePaddle source code by compiling with the following compilation options: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionsValue
WITH_C_APION
WITH_PYTHONOFF(recommended)
WITH_SWIG_PYOFF(recommended)
WITH_GOLANGOFF(recommended)
WITH_GPUON/OFF
WITH_MKLON/OFF
+ +It is best to set up with recommended values to avoid linking with unnecessary libraries. Set other compilation options as you need. + +Pull the latest following code snippet from github, and configure compilation options(replace PADDLE_ROOT with the installation path of the PaddlePaddle C-API inference library): + +```shell +PADDLE_ROOT=/path/of/capi +git clone https://github.com/PaddlePaddle/Paddle.git +cd Paddle +mkdir build +cd build +cmake -DCMAKE_INSTALL_PREFIX=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_GOLANG=OFF \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + .. +``` + +After running the above code to generate Makefile , run: `make && make install`. After successful compilation, the dependencies required by C-API(includes: (1)PaddlePaddle inference library and header files; (2) Third-party libraries and header files) will be stored in the `PADDLE_ROOT` directory. + +If the compilation is successful, see the following directory structure under `PADDLE_ROOT`(includes PaddlePaddle header files and libraries, and third-party libraries and header files(determined by the link methods if necessary)): + +```text +├── include +│   └── paddle +│   ├── arguments.h +│   ├── capi.h +│   ├── capi_private.h +│   ├── config.h +│   ├── error.h +│   ├── gradient_machine.h +│   ├── main.h +│   ├── matrix.h +│   ├── paddle_capi.map +│   └── vector.h +├── lib +│   ├── libpaddle_capi_engine.a +│   ├── libpaddle_capi_layers.a +│   ├── libpaddle_capi_shared.so +│   └── libpaddle_capi_whole.a +└── third_party + ├── gflags + │   ├── include + │   │   └── gflags + │   │   ├── gflags_completions.h + │   │   ├── gflags_declare.h + │   │   ... + │   └── lib + │   └── libgflags.a + ├── glog + │   ├── include + │   │   └── glog + │   │   ├── config.h + │   │   ... + │   └── lib + │   └── libglog.a + ├── openblas + │   ├── include + │   │   ├── cblas.h + │   │   ... + │   └── lib + │   ... + ├── protobuf + │   ├── include + │   │   └── google + │   │   └── protobuf + │   │   ... + │   └── lib + │   └── libprotobuf-lite.a + └── zlib + ├── include + │   ... + └── lib + ... + +``` + +### Linking Description: + +There are three kinds of linking methods: + +1. Linking with dynamic library `libpaddle_capi_shared.so`(This way is much more convenient and easier, **Without special requirements, it is recommended**), refer to the following: + 1. Compiling with CPU version and using `OpenBLAS`; only need to link one library named `libpaddle_capi_shared.so` to develop prediction program through C-API. + 1. Compiling with CPU version and using `MKL` lib, you need to link MKL library directly to develop prediction program through PaddlePaddle C-API, due to `MKL` has its own dynamic library. + 1. Compiling with GPU version, CUDA library will be loaded dynamically on prediction program run-time, and also set CUDA library to  `LD_LIBRARY_PATH` environment variable. + +2. Linking with static library `libpaddle_capi_whole.a`,refer to the following: + 1. Specify `-Wl,--whole-archive` linking options. + 1. Explicitly link third-party libraries such as `gflags`、`glog`、`libz`、`protobuf` .etc, you can find them under `PADDLE_ROOT/third_party` directory. + 1. Use OpenBLAS library if compiling C-API,must explicitly link `libopenblas.a`. + 1. Use MKL when compiling C-API, must explicitly link MKL dynamic library. + +3. Linking with static library `libpaddle_capi_layers.a` and `libpaddle_capi_engine.a`,refer to the following: + 1. This linking methods is mainly used for mobile prediction. + 1. Split `libpaddle_capi_whole.a` into two static linking library at least to reduce the size of linking libraries. + 1. Specify `-Wl,--whole-archive -lpaddle_capi_layers`  and `-Wl,--no-whole-archive -lpaddle_capi_engine` for linking. + 1. The third-party dependencies need explicitly link same as method 2 above. diff --git a/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md b/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md index bc3d50b3ffd3b703a3a656caa1f96bdcf683f68b..dee1b7554f97af17989c3f7739d8feea3b6b8e3f 100644 --- a/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md +++ b/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md @@ -1,3 +1,372 @@ -# Kubernetes Distributed +# Distributed Training on Kubernetes -TBD +We introduced how to create a PaddlePaddle Job with a single node on Kuberentes in the +previous document. +In this article, we will introduce how to create a PaddlePaddle job with multiple nodes +on Kubernetes cluster. + +## Overall Architecture + +Before creating a training job, the users need to slice the training data and deploy +the Python scripts along with it into the distributed file system +(We can use the different type of Kuberentes Volumes to mount different distributed +file systems). Before training starts, The program will copy the training data into the +Container and also save the models at the same path during training. The global architecture +is as follows: + +![PaddlePaddle on Kubernetes Architecture](src/k8s-paddle-arch.png) + +The above figure describes a distributed training architecture which contains 3 nodes, each +Pod mounts a folder of the distributed file system to save training data and models +by Kubernetes Volume. Kubernetes created 3 Pods for this training phase and scheduled these on +3 nodes, each Pod has a PaddlePaddle container. After the containers car created, +PaddlePaddle starts up the communication between PServer and Trainer and read training +data for this training job. + +As the description above, we can start up a PaddlePaddle distributed training job on a +Kubernetes ready cluster with the following steps: + +1. [Build PaddlePaddle Docker Image](#Build a Docker Image) +1. [Split training data and upload to the distributed file system](#Upload Training Data) +1. [Edit a YAML file and create a Kubernetes Job](#Create a Job) +1. [Check the output](#Check The Output) + +We will introduce these steps as follows: + +### Build a Docker Image + +Training docker image needs to package the paddle pserver and paddle trainer runtimes, as well as two more processes before we can kick off the training: + +- Copying the training data into container. +- Generating the initialization arguments for `Paddle PServer` and `Paddle Training` processes. + +Since the paddlepaddle official docker image already has the runtimes we need, we'll take it as the base image and pack some additional scripts for the processes mentioned above to build our training image. for more detail, please find from the following link: +- https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/src/k8s_train/Dockerfile + + +```bash +$ cd doc/howto/usage/k8s/src/k8s_train +$ docker build -t [YOUR_REPO]/paddle:mypaddle . +``` + +And then upload the new Docker Image to a Docker hub: + +```bash +docker push [YOUR_REPO]/paddle:mypaddle +``` + +**[NOTE]**, in the above command arguments, `[YOUR_REPO]` represents your Docker repository, +you need to use your repository instead of it. We will replace it with your respository name to +represent the Docker Image which built in this step. + +### Prepare Training Data + +We can download and split the training job by creating a Kubernetes Job, or custom your image +by editing [k8s_train](./src/k8s_train/). + +Before creating a Job, we need to bind a [persistenVolumeClaim](https://kubernetes.io/docs/user-guide/persistent-volumes) by the different type of +the different file system, the generated dataset would be saved on this volume. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-data +spec: + template: + metadata: + name: pi + spec: + hostNetwork: true + containers: + - name: paddle-data + image: paddlepaddle/paddle-tutorial:k8s_data + imagePullPolicy: Always + volumeMounts: + - mountPath: "/mnt" + name: nfs + env: + - name: OUT_DIR + value: /home/work/mfs/paddle-cluster-job + - name: SPLIT_COUNT + value: "3" + volumes: + - name: nfs + persistentVolumeClaim: + claimName: mfs + restartPolicy: Never +``` + +Create the Job with the following command: + +```bash +> kubectl create -f xxx.yaml +``` + +If created successfully, you can see some information like this: + +```base +[root@paddle-kubernetes-node0 nfsdir]$ tree -d +. +`-- paddle-cluster-job + |-- 0 + | `-- data + |-- 1 + | `-- data + |-- 2 + | `-- data + |-- output + |-- quick_start +``` + +The `paddle-cluster-job` above is the job name for this training job; we need 3 +PaddlePaddle training nodes and save the split training data in `paddle-cluster-job` path, +the folder `0`, `1` and `2` represents the `training_id` on each node, `quick_start` folder is used to store training data, `output` folder is used to store the models and logs. + + +### Create a Job + +Kubernetes allow users to create objects with YAML files, and we can use a command-line tool +to create it. + +The Job YAML file describes that which Docker Image would be used in this training job, how much nodes would be created, what's the startup arguments of `Paddle PServer/Trainer` process and what's the type of Volumes. You can find the details of the YAML filed in +[Kubernetes Job API](http://kubernetes.io/docs/api-reference/batch/v1/definitions/#_v1_job). +The following is an example for this training job: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-cluster-job +spec: + parallelism: 3 + completions: 3 + template: + metadata: + name: paddle-cluster-job + spec: + volumes: + - name: jobpath + hostPath: + path: /home/work/mfs + containers: + - name: trainer + image: [YOUR_REPO]/paddle:mypaddle + command: ["bin/bash", "-c", "/root/start.sh"] + env: + - name: JOB_NAME + value: paddle-cluster-job + - name: JOB_PATH + value: /home/jobpath + - name: JOB_NAMESPACE + value: default + - name: TRAIN_CONFIG_DIR + value: recommendation + - name: CONF_PADDLE_NIC + value: eth0 + - name: CONF_PADDLE_PORT + value: "7164" + - name: CONF_PADDLE_PORTS_NUM + value: "2" + - name: CONF_PADDLE_PORTS_NUM_SPARSE + value: "2" + - name: CONF_PADDLE_GRADIENT_NUM + value: "3" + volumeMounts: + - name: jobpath + mountPath: /home/jobpath + restartPolicy: Never +``` + +In the above YAML file: +- `metadata.name`, The job name. +- `parallelism`, Whether the Kubernetes Job would create `parallelism` Pods at the same time. +- `completions`, The Job would become the success status only when the number of successful Pod(the exit code is 0) + is equal to `completions`. +- `volumeMounts`, the name field `jobpath` is a key, the `mountPath` field represents + the path in the container, and we can define the `jobpath` in `volumes` filed, use `hostPath` + to configure the host path we want to mount. +- `env`, the environment variables in the Container, we pass some startup arguments by + this approach, some details are as following: + - JOB_PATH:the mount path in the container + - JOB_NAME:the job name + - TRAIN_CONFIG_DIR:the job path in the container, we can find the training data path by + combine with JOB_NAME. + - CONF_PADDLE_NIC: the argument `--nics` of `Paddle PServer` process, the network + device name. + - CONF_PADDLE_PORT: the argument `--port` of `Paddle PServer` process. + - CONF_PADDLE_PORTS_NUM: the argument `--ports_num` of `Paddle PServer`, the port number + for dense prameter update. + - CONF_PADDLE_PORTS_NUM_SPARSE:the argument `--ports_num_for_sparse` of `Paddle PServer`, + the port number for sparse parameter update. + - CONF_PADDLE_GRADIENT_NUM:the number of training node, the argument + `--num_gradient_servers` of `Paddle PServer` and `Paddle Trainer`. + +You can find some details information at [here] +(http://www.paddlepaddle.org/docs/develop/documentation/zh/howto/usage/cmd_parameter/detail_introduction_cn.html)。 + +We can use the command-line tool of Kubernetes to create a Job when we finish the YAML file: + +```bash +kubectl create -f job.yaml +``` + +Upon successful creation, Kubernetes would create 3 Pods as PaddlePaddle training node, +pull the Docker image and begin to train. + + +### Checkout the Output + +At the process of training, we can check the logs and the output models which is stored in +the `output` folder. + +**NOTE**, `node_0`, `node_1` and `node_2` represent the +`trainer_id` of the PaddlePaddle training job rather than the node id of Kubernetes. + +```bash +[root@paddle-kubernetes-node0 output]# tree -d +. +├── node_0 +│   ├── server.log +│   └── train.log +├── node_1 +│   ├── server.log +│   └── train.log +├── node_2 +...... +├── pass-00002 +│   ├── done +│   ├── ___embedding_0__.w0 +│   ├── ___embedding_1__.w0 +...... +``` + +We can checkout the status of each training Pod by viewing the logs: + +```bash +[root@paddle-kubernetes-node0 node_0]# cat train.log +I1116 09:10:17.123121 50 Util.cpp:155] commandline: + /usr/local/bin/../opt/paddle/bin/paddle_trainer + --nics=eth0 --port=7164 + --ports_num=2 --comment=paddle_process_by_paddle + --pservers=192.168.129.66,192.168.223.143,192.168.129.71 + --ports_num_for_sparse=2 --config=./trainer_config.py + --trainer_count=4 --num_passes=10 --use_gpu=0 + --log_period=50 --dot_period=10 --saving_period=1 + --local=0 --trainer_id=0 + --save_dir=/home/jobpath/paddle-cluster-job/output +I1116 09:10:17.123440 50 Util.cpp:130] Calling runInitFunctions +I1116 09:10:17.123764 50 Util.cpp:143] Call runInitFunctions done. +[WARNING 2016-11-16 09:10:17,227 default_decorators.py:40] please use keyword arguments in paddle config. +[INFO 2016-11-16 09:10:17,239 networks.py:1282] The input order is [movie_id, title, genres, user_id, gender, age, occupation, rating] +[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__square_error_cost_0__] +I1116 09:10:17.392917 50 Trainer.cpp:170] trainer mode: Normal +I1116 09:10:17.613910 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.680917 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.681543 50 GradientMachine.cpp:134] Initing parameters.. +I1116 09:10:18.012390 50 GradientMachine.cpp:141] Init parameters done. +I1116 09:10:18.018641 50 ParameterClient2.cpp:122] pserver 0 192.168.129.66:7164 +I1116 09:10:18.018950 50 ParameterClient2.cpp:122] pserver 1 192.168.129.66:7165 +I1116 09:10:18.019069 50 ParameterClient2.cpp:122] pserver 2 192.168.223.143:7164 +I1116 09:10:18.019492 50 ParameterClient2.cpp:122] pserver 3 192.168.223.143:7165 +I1116 09:10:18.019716 50 ParameterClient2.cpp:122] pserver 4 192.168.129.71:7164 +I1116 09:10:18.019836 50 ParameterClient2.cpp:122] pserver 5 192.168.129.71:7165 +``` + +## Some Additional Details + +### Using Environment Variables + +Usually we use the environment varialbes to configurate the PaddlePaddle Job which runs in +Kubernetes, `start_paddle.py` provides a start up script to convert the environment variable +to the start up arguments of PaddlePaddle process: + +```bash +API = "/api/v1/namespaces/" +JOBSELECTOR = "labelSelector=job-name=" +JOB_PATH = os.getenv("JOB_PATH") + "/" + os.getenv("JOB_NAME") +JOB_PATH_OUTPUT = JOB_PATH + "/output" +JOBNAME = os.getenv("JOB_NAME") +NAMESPACE = os.getenv("JOB_NAMESPACE") +PADDLE_NIC = os.getenv("CONF_PADDLE_NIC") +PADDLE_PORT = os.getenv("CONF_PADDLE_PORT") +PADDLE_PORTS_NUM = os.getenv("CONF_PADDLE_PORTS_NUM") +PADDLE_PORTS_NUM_SPARSE = os.getenv("CONF_PADDLE_PORTS_NUM_SPARSE") +PADDLE_SERVER_NUM = os.getenv("CONF_PADDLE_GRADIENT_NUM") +``` + +### Communication between Pods + +At the begin of `start_paddle.py`, it would initializes and parses the arguments. + +```python +parser = argparse.ArgumentParser(prog="start_paddle.py", + description='simple tool for k8s') + args, train_args_list = parser.parse_known_args() + train_args = refine_unknown_args(train_args_list) + train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) + podlist = getPodList() +``` + +And then query the status of all the other Pods of this Job by the function `getPodList()`, and fetch `triner_id` by the function `getIdMap(podlist)` if all the Pods status is `RUNNING`. + +```python + podlist = getPodList() + # need to wait until all pods are running + while not isPodAllRunning(podlist): + time.sleep(10) + podlist = getPodList() + idMap = getIdMap(podlist) +``` + +**NOTE**: `getPodList()` would prefetch all the Pods in the current namespace, if some +Pods are alreay running, it may cause some error. We will use [statfulesets](https://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets) instead of +Kubernetes Pod or Replicaset in the future. + +The function `getIdMap(podlist)` fetches IPs addresses of `podlist` and then sort them +to generate `trainer_id`. + +```python +def getIdMap(podlist): + ''' + generate tainer_id by ip + ''' + ips = [] + for pod in podlist["items"]: + ips.append(pod["status"]["podIP"]) + ips.sort() + idMap = {} + for i in range(len(ips)): + idMap[ips[i]] = i + return idMap +``` + +After getting the `idMap`, we can generate the arguments of `Paddle PServer` and `Paddle Trainer` +so that we can start up them by `startPaddle(idMap, train_args_dict)`. + +### Create Job + +The main goal of `startPaddle` is generating the arguments of `Paddle PServer` and +`Paddle Trainer` processes. Take `Paddle Trainer` as an example, we parse the +environment variable and then get `PADDLE_NIC`, `PADDLE_PORT`, `PADDLE_PORTS_NUM` and etc..., +finally find `trainerId` from `idMap` according to its IP address. + +```python + program = 'paddle train' + args = " --nics=" + PADDLE_NIC + args += " --port=" + str(PADDLE_PORT) + args += " --ports_num=" + str(PADDLE_PORTS_NUM) + args += " --comment=" + "paddle_process_by_paddle" + ip_string = "" + for ip in idMap.keys(): + ip_string += (ip + ",") + ip_string = ip_string.rstrip(",") + args += " --pservers=" + ip_string + args_ext = "" + for key, value in train_args_dict.items(): + args_ext += (' --' + key + '=' + value) + localIP = socket.gethostbyname(socket.gethostname()) + trainerId = idMap[localIP] + args += " " + args_ext + " --trainer_id=" + \ + str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT +``` diff --git a/doc/v2/howto/cmd_parameter/index_en.rst b/doc/v2/howto/cmd_parameter/index_en.rst index 0e3c72d27aca063f1b6f1c23e55718dba373c40a..f49683948ef78f363e2439cc25332431830eeb24 100644 --- a/doc/v2/howto/cmd_parameter/index_en.rst +++ b/doc/v2/howto/cmd_parameter/index_en.rst @@ -2,10 +2,25 @@ Set Command-line Parameters =========================== +The implementation of deep learning algorithms has a variety of characteristics, such as running environment, running stage, structure of the model and the traning strategy. PaddlePaddle supports the user to set various command-line parameters flexibly, which helps to achieve control of the model training or prediction process. + +In this part, we take several actual scenarios as an example, and the use of some command-line parameters is displayed: .. toctree:: :maxdepth: 1 use_case_en.md + +Then, we summarize and classify the use of all command-line parameters: + +.. toctree:: + :maxdepth: 1 + arguments_en.md + +Finally, the detailed descriptions are given, and we try to explain the propeties and significance of these command-line parameters in detail: + +.. toctree:: + :maxdepth: 1 + detail_introduction_en.md diff --git a/doc/v2/howto/rnn/recurrent_group_en.md b/doc/v2/howto/rnn/recurrent_group_en.md index d264b0a9f85faffd49c1982117cb5a3ac6ffc015..de6b60f29eb97029a54609cd2194bb7faf3ffec5 100644 --- a/doc/v2/howto/rnn/recurrent_group_en.md +++ b/doc/v2/howto/rnn/recurrent_group_en.md @@ -1,3 +1,96 @@ # Recurrent Group Tutorial -TBD +## Overview + +Sequential data is common in natural language processing. + +A sentence is a sequence of words and many sentences form a paragraph further. Therefore, a paragraph can be viewed as a nested sequence with two level, where each element of the sequence is another sequence. That is to say, sequential data could be recursive. An example of two-level recursive sequential data is that an article is composed of a sequence of sentences, and each sentence a sequence of words. + +PaddlePaddle and PaddlePaddle v2 support two-level recursive sequential data. The two-level sequence is a very flexible data, which helps us to better describe more complex language data such as discribing paragraphs and several rounds of dialogues. Based on two-level sequence input, we can design and build a flexible, hierarchical RNN model that encodes input data from the word and sentence level. For the support of arbitrary levels, please refer to PaddlePaddle Fluid. + +In PaddlePaddle, `recurrent_group` is an arbitrarily complex RNN unit. The user only needs to define the calculation that the RNN will complete in one time step. PaddlePaddle is responsible for the propagation of information and error in time series. + +Furthermore, `recurrent_group` can also be extended to handle two-level sequence. By defining two nested `recurrent_group` operations at the clause level and the word level respectively, a hierarchical and complex RNN is finally achieved. + +Currently, in the PaddlePaddle, there are `recurrent_group` and some Layers that can process bidirectional sequences. For details, refer to the document: Layers for supporting double-layer sequences as input. + +## Related Concepts + +### Basic Principle +`recurrent_group` is an arbitrarily complex RNN unit supported by PaddlePaddle. The user only needs to focus on the calculations that the RNN is designed to complete within a single time step. The PaddlePaddle is responsible for completing the propagation of information and gradients over time. + +In PaddlePaddle, a simple call to `recurrent_group` is as follows: + +``` python +recurrent_group(step, input, reverse) +``` +- step: A callable function that defines the calculations completed by the RNN unit within a time step +- input: The input must be a single-layer sequence or a double-layer sequence +- reverse: Whether to process the input sequence in reverse order + +The core of using `recurrent_group` is to design the logic of the step function. The step function can be freely combined with various layers supported by PaddlePaddle to complete arbitrary arithmetic logic. The input of `recurrent_group` (input) becomes the input of the step function. Since the step function only focuses on the calculation within one time step of RNN, here `recurrent_group` completes the splitting of the original input data for us. + +### Input +The input sequence processed by `recurrent_group` is mainly divided into the following three types: + +- **Input Data**: When putting a two-level sequence into `recurrent_group`, it will be disassembled into a single-level sequence. When putting a single-level sequence into `recurrent_group`, it will be disassembled into a non-sequence and then passed to the step function. This process is completely transparent to the user. There are two possible types: 1) User input via data_layer; 2) Output from other layers. + +- **Read-only Memory Input**: `StaticInput` defines a read-only Memory. The input specified by `StaticInput` will not be disassembled by `recurrent_group`, and each time step of the `recurrent_group` loop will always be able to reference all inputs. It may be a non-sequence or a single-layer sequence. + +- **Input of Sequence Generation Task**: `GeneratedInput` is only used to specify input data in a sequence generation task. + +### Input Example + +Sequence generation tasks mostly follow the encoder-decoer architecture. The encoder and decoder can be arbitrary neural network units capable of processing sequences and RNN is the most popular choice. + +Given the encoder output and the current word, the decoder predicts the next most likely word each time. In this structure, the decoder accepts two inputs: + +- Target sequence to be generated: a input of the decoder and the basis of the decoder loop. `recurrent_group` will disassemble this input type. + +- Encoder output, an non-sequencce or single-sequence: a unbounded memory. Each time step in the decoder loop will reference the entire result and should not be disassembled. This type of input must be specified via `StaticInput`. For more discussion on Unbounded Memory, please refer to the paper [Neural Turning Machine](https://arxiv.org/abs/1410.5401). + +In a sequence generation task, the decoder RNN always refers to the word vector of the word predicted at the previous moment as the current time input. `GeneratedInput` will automate this process. + +### Output +The `step` function must return the output of one or more Layers. The output of this Layer will be the final output of the entire `recurrent_group`. In the output process, `recurrent_group` will concatenate the output of each time step, which is also transparent to the user. + +### Memory +Memory can only be defined and used in `recurrent_group`. Memory cannot exist independently and must point to a layer defined by PaddlePaddle. Memory is referenced to get a momentary output from this layer, so memory can be interpreted as a delay operation. + +The user can explicitly specify the output of a layer to initialize the memory. When not specified, memory is initialized to 0 by default. + +## Sequence-level RNN Introduction + +`recurrent_group` helps us to split the input sequence, merge the output, and loop through the sequence of computational logic. + +Using this feature, the two nested `recurrent_group` can handle the nested two-level sequences, implementing sequence-level RNN structures at both the word and sentence levels. + +- Word-level RNN: each state corresponds to a word. +- Sequence-level RNN: a sequence-layer RNN consists of multiple word-layer RNNs. Each word-layer RNN (ie, each state of a sequence-layer RNN) has a subsequence. + +For convenience of description, the following takes the NLP task as an example. A paragraph containing a subsequence is defined as a two-level sequence, and a sentence containing a word is defined as a single-layer sequence. Then, the zero-level sequence is a word. + +## Usage of Sequence-level RNN + +### Usage of Training Process +Using `recurrent_group` requires the following conventions: + +- **Single-input Single-output**: Both input and output are single layer sequences. + - If there are multiple inputs, the number of words in different input sequences must be exactly equal. + - A single-layer sequence is output, and the number of words in the output sequence is the same as the input sequence. + - memory: define memory to point to a layer in the step function, get a moment output from this layer by referencing memory to form a recurrent connection. The is_seq parameter of memory must be false. If memory is not defined, the operations within each time step are independent. + - boot_layer: the initial state of memory, set 0 by default. is_seq in memory must be false. + +- **Double-input Double-output**: Both input and output are two-level sequence. + - If there are multiple input sequences, the number of subsequence contained in different inputs must be strictly equal, but the number of words in the subsequence may not be equal. + - output a two-level sequence. The number of subsequence and the number of words are the same as the specified input sequence and the first input is default. + - memory: defining memory in the step function, pointing to a layer, by referring to the memory to get the output of this layer at a time, forming a recurrent connection. The memory defined in the outer `recurrent_group` step function can record the state of the previous subsequence, either as a single-level sequence (only as read-only memory) or as a word. If memory is not defined, the operations between subsequence are independent. + - boot_layer: the initial state of memory. It is either a single-level sequence (only as read-only memory) or a vector. The default is not set, that is, the initial state is 0. + +- **Double-input Single-output**: not support for now, and output the error with "In hierachical RNN, all out links should be from sequences now". + +### Usage of Generation Process +Using `beam_search` need follow those conventions: + +- Word-level RNN: generate the next word from a word. +- Sequence-level RNN: the single-layer RNN generated subsequence is concatenated into a new double-layer sequence. Semantically, there is no case where a subsequence generates the next subseq directly. diff --git a/paddle/.gitignore b/paddle/.gitignore index f921eef14156a97e4fd250f014960e306b43f35a..1c1c0c2c829f088d7e3f52ca007fcb8f33a16a36 100644 --- a/paddle/.gitignore +++ b/paddle/.gitignore @@ -1,3 +1,4 @@ +.timestamp *.o *.a .svn diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index d2a4b1335464f553a361728e64ed5ca177ca53da..8b1ca5e16548334ed0c9a6d31b88e0805304579e 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,4 +1,4 @@ -if(NOT WITH_FLUID) +if(NOT WITH_FLUID_ONLY) add_subdirectory(cuda) add_subdirectory(function) add_subdirectory(utils) @@ -24,6 +24,6 @@ if(NOT WITH_FLUID) endif() add_subdirectory(testing) -if(NOT MOBILE_INFERENCE AND NOT ANDROID AND NOT IOS) +if(NOT MOBILE_INFERENCE AND NOT RPI) add_subdirectory(fluid) endif() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index cf84568ecdf1227b0d0ed3606a4a9a6e5186af72..06e1f5d5f0884efabfcdf917ca5c35d94ad5dce9 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -89,16 +89,17 @@ SWIG_LINK_LIBRARIES(swig_paddle ${START_END} ) -add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/paddle/py_paddle/_swig_paddle.so - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_SOURCE_DIR}/paddle/py_paddle - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_SOURCE_DIR}/paddle/py_paddle - COMMAND ${CMAKE_COMMAND} -E touch .timestamp +add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/.timestamp WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle DEPENDS _swig_paddle ) # TODO(yuyang18) : make wheel name calculated by cmake -add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_SOURCE_DIR}/paddle/py_paddle/_swig_paddle.so) +add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so) if(WITH_TESTING) IF(NOT PY_PIP_FOUND) diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt index 761aeb5b174105edece8880a9f5012c13a63fd11..13cb79129cc2272d215cdb475fb146b37266699e 100644 --- a/paddle/api/test/CMakeLists.txt +++ b/paddle/api/test/CMakeLists.txt @@ -1,3 +1,8 @@ +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/testTrain.py + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/*.py ${CMAKE_CURRENT_BINARY_DIR} +) +add_custom_target(copy_api_test ALL DEPENDS testTrain.py) + py_test(testTrain SRCS testTrain.py) py_test(testMatrix SRCS testMatrix.py) py_test(testVector SRCS testVector.py) diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 63ec51564793ca2255032d0efbe2c47326f8b698..b790fa39fe863bbb00f6cd36d4c63481b7634fe1 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -370,4 +370,48 @@ extern void hl_maxout_backward(real* inGrad, size_t featLen, size_t groups); +/** + * @brief Upsample forward. + * @param[in] inputData input data. + * @param[out] maskData the mask data from MaxPoolWithMaskLayer. + * @param[out] batchSize the batch size of the input. + * @param[in] imgSizeH image height. + * @param[in] imgSizeW image width. + * @param[in] channels the input channels. + * @param[in] outputH the output height. + * @param[in] outputW the output widht. + * @param[out] outputData output data. + */ +extern void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData); + +/** + * @brief Upsample backward. + * @param[in] outputGradData the output grad data. + * @param[out] maskData the mask data from MaxPoolWithMaskLayer. + * @param[out] batchSize the batch size of the input. + * @param[in] imgSizeH image height. + * @param[in] imgSizeW image width. + * @param[in] channels the input channels. + * @param[in] outputH the output height. + * @param[in] outputW the output widht. + * @param[out] inputGradData the input grad data. + */ +extern void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData); + #endif // HL_CNN_H_ diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index c39bd3228d3f2ea7495cd21f5ff60bdfbbd2b51d..997eed62e07827f375c7441554b397fdd0bd6a80 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -224,4 +224,24 @@ inline void hl_maxout_backward(real* inGrad, size_t featLen, size_t group) {} +inline void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData) {} + +inline void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData) {} + #endif // HL_CNN_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index a4459243e8a7c8be58be2255faf89e29817fbdf5..bac743a293cc97b114281e510d06367a86536452 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -1028,3 +1028,79 @@ void hl_maxout_backward(real* inGrad, num_kernels, inGrad, outGrad, idData, size, featLen, groups); CHECK_SYNC("hl_maxout_backward failed"); } + +__global__ void upsampleForwardCompute(real* input_data, + real* mask_data, + size_t nthreads, + size_t in_h, + size_t in_w, + size_t out_h, + size_t out_w, + real* output_data) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offset = index / (in_w * in_h) * out_h * out_w; + int upsample_idx = static_cast(mask_data[index]); + output_data[offset + upsample_idx] = input_data[index]; + } +} + +__global__ void upsampleBackwardCompute(real* out_grad, + real* mask_data, + size_t nthreads, + size_t in_h, + size_t in_w, + size_t out_h, + size_t out_w, + real* input_grad) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offset = index / (in_w * in_h) * out_h * out_w; + int upsample_idx = static_cast(mask_data[index]); + input_grad[index] = out_grad[offset + upsample_idx]; + } +} + +void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData) { + int num_kernels = batchSize * imgSizeH * imgSizeW * channels; + int blocks = (num_kernels + 1024 - 1) / 1024; + upsampleForwardCompute<<>>(inputData, + maskData, + num_kernels, + imgSizeH, + imgSizeW, + outputH, + outputW, + outputData); + CHECK_SYNC("hl_upsample_forward failed"); +} + +void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData) { + int num_kernels = batchSize * imgSizeH * imgSizeW * channels; + int blocks = (num_kernels + 1024 - 1) / 1024; + upsampleBackwardCompute<<>>(outputGradData, + maskData, + num_kernels, + imgSizeH, + imgSizeW, + outputH, + outputW, + inputGradData); + CHECK_SYNC("hl_upsample_backward failed"); +} diff --git a/paddle/fluid/framework/.clang-format b/paddle/fluid/.clang-format similarity index 100% rename from paddle/fluid/framework/.clang-format rename to paddle/fluid/.clang-format diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index d725763b01d5953985f8e090605f68a8419b5498..d274d96c29bdbf5973d568d783369c3975bdc436 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(platform) add_subdirectory(framework) add_subdirectory(operators) add_subdirectory(pybind) -add_subdirectory(inference) add_subdirectory(string) add_subdirectory(recordio) +# NOTE: please add subdirectory inference at last. +add_subdirectory(inference) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index a4ea74a6d2fbc29dc33a6b57ee453f49ed36c7fa..1f3ca24df16cf080d325fbdc0d613a828e384b2a 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -1,3 +1,4 @@ +add_subdirectory(details) # ddim lib proto_library(framework_proto SRCS framework.proto) @@ -6,9 +7,9 @@ cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) if(WITH_GPU) - nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place paddle_memory device_context framework_proto) + nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place memory device_context framework_proto) else() - cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place paddle_memory device_context framework_proto) + cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place memory device_context framework_proto) endif() cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) @@ -20,9 +21,9 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) -nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place paddle_memory device_context init) +nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context init) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) -cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) +cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) @@ -73,19 +74,20 @@ py_proto_compile(framework_py_proto SRCS framework.proto) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(framework_py_proto framework_py_proto_init) add_custom_command(TARGET framework_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto + COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/ COMMENT "Copy generated python proto into directory paddle/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) -cc_library(backward SRCS backward.cc DEPS net_op) -cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context fill_constant_op) cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor) cc_library(feed_fetch_method SRCS feed_fetch_method.cc DEPS lod_tensor scope glog) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope -framework_proto backward glog lod_rank_table feed_fetch_method) +framework_proto glog lod_rank_table feed_fetch_method) + + +cc_library(parallel_executor SRCS parallel_executor.cc DEPS multi_devices_graph_builder threaded_ssa_graph_executor) cc_library(prune SRCS prune.cc DEPS framework_proto) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) diff --git a/paddle/fluid/framework/backward.cc b/paddle/fluid/framework/backward.cc deleted file mode 100644 index 1314af2b3dab281bd201e6a77bfbe87e0bd58ffb..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward.cc +++ /dev/null @@ -1,585 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/backward.h" -#include "paddle/fluid/operators/net_op.h" - -#include -#include -#include -#include - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/net_op.h" - -namespace paddle { -namespace framework { - -static std::unordered_set* g_ctrl_flow_ops_ = nullptr; -// Control Flow operators's backward is significantly different from -// computational operators. Hack Code here. -// We should design a better way to backward CtrlFlowOps. -static std::unordered_set& CtrlFlowOps() { - if (g_ctrl_flow_ops_ == nullptr) { - g_ctrl_flow_ops_ = new std::unordered_set{ - "increment", "lod_rank_table", "less_than"}; - } - return *g_ctrl_flow_ops_; -} - -static inline std::unique_ptr CreateGradOp( - const OperatorBase& op, const std::unordered_set& no_grad_set, - std::unordered_map* grad_to_var) { - OpDesc op_desc; - op_desc.SetInputMap(op.Inputs()); - op_desc.SetOutputMap(op.Outputs()); - op_desc.SetType(op.Type()); - op_desc.SetAttrMap(op.Attrs()); - auto& info = OpInfoMap::Instance().Get(op.Type()); - auto grad_descs = info.GradOpMaker()(op_desc, no_grad_set, grad_to_var, {}); - std::vector> grad_ops; - grad_ops.reserve(grad_descs.size()); - std::transform(grad_descs.begin(), grad_descs.end(), - std::back_inserter(grad_ops), - [](const std::unique_ptr& grad_desc) { - return OpRegistry::CreateOp(*grad_desc); - }); - PADDLE_ENFORCE(!grad_ops.empty()); - if (grad_ops.size() == 1) { - return std::move(grad_ops[0]); - } else { - auto net_op = new operators::NetOp(); - for (auto& grad_op : grad_ops) { - net_op->AppendOp(std::move(grad_op)); - } - net_op->CompleteAddOp(); - return std::unique_ptr(net_op); - } -} - -template -static void ForEachVarName(const Map& names, T callback) { - for (auto& name : names) { - for (auto& n : name.second) { - if (callback(n)) return; - } - } -} - -// return whether all the names + suffixes in the set -static bool AllInSet( - const std::map>& names, - const std::string& suffix, const std::unordered_set& set) { - bool all_in_set = true; - ForEachVarName(names, [&all_in_set, &set, &suffix](const std::string& n) { - all_in_set = set.find(n + suffix) != set.end(); - return !all_in_set; - }); - return all_in_set; -} - -static std::unique_ptr NOP() { - auto net_op = new operators::NetOp(); - net_op->SetType("@NOP@"); - net_op->CompleteAddOp(); - return std::unique_ptr(net_op); -} - -// Get backward operator from a forward operator, a recursive implementation. -// -// no_grad_names the gradient variable names without gradient calculating. -// -// uniq_id is a unique index used inside recursively calling -// BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and -// pass `uniq_id` through recursive calling. -// -// returns The backward operator. In a simple situation, it may be a simple -// operator, in a complex situation, it maybe a NetOp. -// -// See Backward.h for details -static std::unique_ptr BackwardRecursive( - const OperatorBase& forwardOp, - std::unordered_set& no_grad_names, - std::unordered_map* grad_to_var, - size_t& uniq_id) { - // If all input gradients of forwarding operator do not need to calculate, - // just return an NOP. Not return null ptr because NOP does not take - // too much time for calculation, but it is useful for simplifying logic. - if (AllInSet(forwardOp.Inputs() /*names*/, kGradVarSuffix /*suffix*/, - no_grad_names /*set*/)) { - return NOP(); - } - - // All output gradients of forwarding operator do not need to calculate. - // Then all input gradients cannot be computed at all, and we put them into - // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.Outputs() /*names*/, kGradVarSuffix /*suffix*/, - no_grad_names /*set*/)) { - ForEachVarName(forwardOp.Inputs(), - [&no_grad_names](const std::string& name) -> bool { - no_grad_names.insert(GradVarName(name)); - return false; - }); - return NOP(); - } - - // Returned gradient network - auto net = std::unique_ptr(new operators::NetOp()); - - if (forwardOp.IsNetOp()) { - // Because forwardOp is a net op, it can static_cast. - auto& forwardNet = static_cast(forwardOp); - - // Map from output gradient variable name to operator's indices in - // backward net's ops_. That operator generates that variable. - std::unordered_map> dup_output_ops; - - size_t local_op_id = 0; - // reversely travel forwardNet and collect all duplicate outputs. - for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); - ++it, ++local_op_id) { - auto& fwd = *it; - auto bwd = BackwardRecursive(*fwd, no_grad_names, grad_to_var, uniq_id); - ForEachVarName(bwd->Outputs(), - [&dup_output_ops, local_op_id](const std::string& out) { - dup_output_ops[out].emplace_back(local_op_id); - return false; - }); - net->AppendOp(std::move(bwd)); - } - // Get unique ID for this method. - auto uid = uniq_id++; - // TODO(dzh): more comment - // multiple operators which have the same output (y for example) may - // overwrite the same y variable when backward, special operations are token - // to handle this case. For each duplicate output, rename it to an alias - // (original name with a offset), append an `add` op for its operator, - // and finally sum all the alias variable to the final output variable y. - using Pos = std::pair>; - std::list insert_position; - for (auto& dup_output_op : dup_output_ops) { - const std::string& name = dup_output_op.first; - // duplicate @Empty@ don't need to be added - if (name == kEmptyVarName) continue; - - auto& dup_op = dup_output_op.second; - // no duplicate output - if (dup_op.size() == 1) continue; - - // process the duplicate outputs - std::vector dup_outputs; - for (size_t i = 0; i < dup_op.size(); ++i) { - // rename each duplicate output to an alias - auto op_offset = dup_op[i]; - dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + - std::to_string(i)); - net->ops_[op_offset]->Rename(name, dup_outputs.back()); - } - // collect all the offset for each alias, - // insert a sum operator to add all aliases to output - insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp("sum", {{"X", dup_outputs}}, {{"Out", {name}}}, - AttributeMap{})}); - } - - // make sure the inserted `sum` ops follow the BFS order. - insert_position.sort( - [](const Pos& l, const Pos& r) { return l.first > r.first; }); - - for (auto& pos : insert_position) { - net->InsertOp(pos.first + 1, std::move(pos.second)); - } - } else { - std::unique_ptr grad_op( - CreateGradOp(forwardOp, no_grad_names, grad_to_var)); - - ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( - const std::string& grad_input) { - if (no_grad_names.count(grad_input)) { - // +1 for \0 - std::string prefix = grad_input.substr( - 0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); - grad_op->Rename(grad_input, prefix + kZeroVarSuffix); - - // If part of input gradient of that operator is not calculated, fill - // zero variables to that input gradient. - net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}}, - {{"Out", {grad_input}}}, - AttributeMap{})); - } - return false; - }); - - ForEachVarName(grad_op->Outputs(), - [&no_grad_names, &grad_op](const std::string& grad_output) { - if (no_grad_names.count(grad_output)) { - grad_op->Rename(grad_output, kEmptyVarName); - } - return false; - }); - - if (net->ops_.empty()) { // Current no aux op is added to network - return grad_op; - } - net->AppendOp(std::move(grad_op)); - } - net->SetType("@GENERATED_BACKWARD@"); - net->CompleteAddOp(); - return std::unique_ptr( - static_cast(net.release())); -} - -// See header for comments -std::unique_ptr Backward( - const OperatorBase& forwardOp, - const std::unordered_set& no_grad_vars) { - std::unordered_set no_grad_names; - no_grad_names.reserve(no_grad_vars.size() + 1); - - no_grad_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); - - for (auto& name : no_grad_vars) { - no_grad_names.insert(name + kGradVarSuffix); - } - size_t uid = 0; - std::unordered_map grad_to_var; - return BackwardRecursive(forwardOp, no_grad_names, &grad_to_var, uid); -} - -// ==================================== // - -static bool AllGradInSet(const std::vector& names, - const std::unordered_set& set) { - for (const std::string& name : names) { - if (!set.count(GradVarName(name))) { - return false; - } - } - if (VLOG_IS_ON(10)) { - std::ostringstream sout; - sout << "All input {"; - for (auto& name : names) { - sout << name << ","; - } - sout << "} is in {"; - for (auto& name : set) { - sout << name << ","; - } - sout << "}"; - VLOG(10) << sout.str(); - } - return true; -} - -static std::string FwdName(const std::string& grad_name) { - auto pos = grad_name.find("@GRAD"); - if (pos == std::string::npos) { - return ""; - } else { - return grad_name.substr(0, pos); - } -} - -static void CreateGradVarInBlock( - size_t grad_op_start_index, - const std::unordered_map& param_name_map, - BlockDesc* block_desc, - std::unordered_map* grad_var_record) { - auto ops = block_desc->AllOps(); - for (size_t op_index = grad_op_start_index; op_index < ops.size(); - ++op_index) { - std::unordered_set new_vars; - auto& ctrl_flow_ops = CtrlFlowOps(); - ForEachVarName(ops[op_index]->Outputs(), - [&](const std::string& grad_var_name) { - if (ctrl_flow_ops.find(ops[op_index]->Type()) != - ctrl_flow_ops.end()) { - if (block_desc->HasVarRecursive(grad_var_name)) { - return false; - } - } else { - if (block_desc->HasVar(grad_var_name)) { - return false; - } - } - if (grad_var_name == framework::kEmptyVarName) { - return false; - } - auto var = block_desc->Var(grad_var_name); - VLOG(10) << "Creating Variable " << grad_var_name; - new_vars.insert(var->Name()); - auto it = param_name_map.find(grad_var_name); - if (it == param_name_map.end()) { - return false; - } - auto param_var_name = it->second; - auto& grad_record = (*grad_var_record)[param_var_name]; - grad_record.name_ = grad_var_name; - grad_record.block_idx_ = block_desc->ID(); - grad_record.op_idx_ = static_cast(op_index); - return false; /* not break */ - }); - ops[op_index]->InferVarType(block_desc); - for (auto& arg : ops[op_index]->OutputArgumentNames()) { - if (new_vars.find(arg) == new_vars.end()) { - continue; - } - auto pname = FwdName(arg); - auto* param = block_desc->FindVarRecursive(pname); - auto* grad = block_desc->FindVar(arg); - if (param == nullptr) { - grad->SetDataType(proto::VarType::FP32); - } else { - grad->SetDataType(param->GetDataType()); - } - } - ops[op_index]->InferShape(*block_desc); - } -} - -std::vector> MakeOpGrad( - const OpDesc* op_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - const std::vector& grad_block = std::vector()) { - std::vector> grad_op_descs; - // All input gradients of forwarding operator do not need to calculate. - const std::vector& inputs = op_desc->InputArgumentNames(); - if (AllGradInSet(inputs, *no_grad_vars)) { - VLOG(10) << "Drop operator " << op_desc->Type(); - return grad_op_descs; // empty vector - } - - // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc->OutputArgumentNames(); - - if (AllGradInSet(outputs, *no_grad_vars)) { - VLOG(10) << "Drop operator " << op_desc->Type(); - // FIXME: Hack code here - auto& ctrl_flow_ops = CtrlFlowOps(); - if (ctrl_flow_ops.find(op_desc->Type()) == ctrl_flow_ops.end()) { - // Only computational op need drop input's gradient. - for (const std::string& name : inputs) { - no_grad_vars->insert(GradVarName(name)); - VLOG(10) << " Also drop " << GradVarName(name); - } - } - - return grad_op_descs; // empty vector - } - - grad_op_descs = - OpInfoMap::Instance() - .Get(op_desc->Type()) - .GradOpMaker()(*op_desc, *no_grad_vars, grad_to_var, grad_block); - - std::list> pending_fill_zeros_ops; - for (auto& desc : grad_op_descs) { - for (const std::string& in_name : desc->InputArgumentNames()) { - if (no_grad_vars->count(in_name)) { - std::string prefix = in_name.substr( - 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); - std::string new_name = prefix + kZeroVarSuffix; - desc->Rename(in_name, new_name); - std::unique_ptr fill_zeros_op( - new OpDesc("fill_zeros_like", {{"X", {prefix}}}, - {{"Out", {new_name}}}, AttributeMap{})); - pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); - } - } - } - - for (auto& p : pending_fill_zeros_ops) { - grad_op_descs.insert(grad_op_descs.begin(), std::move(p)); - } - return grad_op_descs; -} - -static BlockDesc* CreateStepBlock( - ProgramDesc& program_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - int step_block_idx); - -std::vector> MakeBlockBackward( - ProgramDesc& program_desc, int block_idx, - std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var) { - VLOG(5) << "MakeBlockBackward"; - BlockDesc* cur_block = program_desc.MutableBlock(block_idx); - std::vector op_descs = cur_block->AllOps(); - std::unordered_map> dup_out_ops; - size_t grad_desc_idx = 0; - std::vector> backward_descs; - - for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { - VLOG(5) << "Making backward " << (*it)->Type() << " op"; - std::vector> op_grads; - - if ((*it)->Type() == "recurrent" || (*it)->Type() == "while" || - (*it)->Type() == "parallel_do") { - int step_block_idx = (*it)->GetBlockAttr("sub_block"); - BlockDesc* backward_block = CreateStepBlock(program_desc, no_grad_vars, - grad_to_var, step_block_idx); - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); - } else if ((*it)->Type() == "conditional_block") { - BlockDesc* backward_block = - CreateStepBlock(program_desc, no_grad_vars, grad_to_var, - (*it)->GetBlockAttr("sub_block")); - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); - } else { - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var); - } - - if (VLOG_IS_ON(10)) { - std::ostringstream sout; - sout << "Made "; - for (auto& op_grad : op_grads) { - sout << op_grad->Type() << " "; - } - VLOG(10) << sout.str(); - } - - for (const auto& desc : op_grads) { - for (const std::string& out_name : desc->OutputArgumentNames()) { - if (out_name.find("@GRAD") == std::string::npos) { - // Not all outputs of a backward operator is a gradient. Only gradient - // need to be sum. Skip variables are not gradient. - continue; - } - dup_out_ops[out_name].emplace_back(grad_desc_idx); - } - ++grad_desc_idx; - } - std::transform(op_grads.begin(), op_grads.end(), - std::back_inserter(backward_descs), - [](std::unique_ptr& ptr) { return std::move(ptr); }); - } - - VLOG(5) << "Appending Sums"; - // Check whether some variables are written more than once - std::list>> pending_sum_ops; - for (const auto& dup : dup_out_ops) { - const std::string& out_name = dup.first; - const std::vector dup_op = dup.second; - if (out_name != kEmptyVarName && dup_op.size() > 1) { - std::vector sum_op_inputs; - std::string next_g_name = out_name; - for (size_t i = 0; i < dup_op.size(); ++i) { - VLOG(10) << backward_descs[dup_op[i]]->Type() << " has " << out_name - << " duplicated"; - std::string new_name = out_name + "@RENAME@" + std::to_string(i); - backward_descs[dup_op[i]]->RenameOutput(out_name, new_name); - backward_descs[dup_op[i]]->RenameInput(out_name, next_g_name); - sum_op_inputs.emplace_back(new_name); - next_g_name = sum_op_inputs.back(); - } - std::unique_ptr sum_op(new OpDesc("sum", {{"X", sum_op_inputs}}, - {{"Out", {out_name}}}, - AttributeMap{})); - pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); - } - } - - pending_sum_ops.sort([](const std::pair>& a, - const std::pair>& b) { - return a.first > b.first; - }); - for (auto& p : pending_sum_ops) { - backward_descs.insert(backward_descs.begin() + p.first + 1, - std::move(p.second)); - } - - VLOG(5) << "MakeBlockBackward Finished"; - - return backward_descs; -} - -static BlockDesc* CreateStepBlock( - ProgramDesc& program_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - int step_block_idx) { - auto backward_block_op_descs = MakeBlockBackward(program_desc, step_block_idx, - no_grad_vars, grad_to_var); - BlockDesc* backward_block = - program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx)); - for (auto& ptr : backward_block_op_descs) { - backward_block->AppendAllocatedOp(move(ptr)); - } - return backward_block; -} - -ParamGradInfoMap AppendBackward( - ProgramDesc& program_desc, const VarDesc& target, - const std::unordered_set& no_grad_vars) { - std::unordered_set no_grad_var_names; - no_grad_var_names.reserve(no_grad_vars.size() + 1); - no_grad_var_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); - for (auto& name : no_grad_vars) { - no_grad_var_names.insert(GradVarName(name)); - } - - const int root_block_idx = 0; - auto root_block = program_desc.MutableBlock(root_block_idx); - - std::string fill_one_op_out = GradVarName(target.Name()); - bool is_scalar = target.GetShape() == std::vector{1}; - PADDLE_ENFORCE(is_scalar, "target should be scalar"); - VLOG(3) << "backward from loss=" << target.Name() - << " data_type=" << target.GetDataType(); - std::unique_ptr fill_one_op( - new OpDesc("fill_constant", {}, {{"Out", {fill_one_op_out}}}, - {{"shape", std::vector{1}}, - {"value", static_cast(1.0)}, - {"dtype", target.GetDataType()}})); - // infer var type of fill_one_op - fill_one_op->InferVarType(root_block); - - root_block->AppendAllocatedOp(std::move(fill_one_op)); - size_t forward_op_num = root_block->OpSize(); - size_t forward_block_num = program_desc.Size(); - - // Insert backward operators - std::unordered_map grad_to_var; - auto backward_op_descs = MakeBlockBackward(program_desc, root_block_idx, - &no_grad_var_names, &grad_to_var); - - for (auto& ptr : backward_op_descs) { - root_block->AppendAllocatedOp(std::move(ptr)); - } - // Create Variable - - // Create target gradient variable - std::unordered_map retv; - - auto var = root_block->Var(fill_one_op_out); - var->SetDataType(target.GetDataType()); - var->SetShape(target.GetShape()); - auto& target_grad = retv[target.Name()]; - target_grad.name_ = fill_one_op_out; - target_grad.block_idx_ = root_block_idx; - target_grad.op_idx_ = static_cast(forward_op_num); - - // create grad_var for all blocks in this program - CreateGradVarInBlock(forward_op_num, grad_to_var, root_block, &retv); - for (size_t block_index = forward_block_num; - block_index < program_desc.Size(); ++block_index) { - CreateGradVarInBlock(0, grad_to_var, program_desc.MutableBlock(block_index), - &retv); - } - return retv; -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/backward.h b/paddle/fluid/framework/backward.h deleted file mode 100644 index 3a971090c25c85efbf976532c364371baba9a870..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward.h +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/program_desc.h" - -namespace paddle { -namespace framework { - -// Create the backward operator from a forward operator. -// TODO(yuyang18): Add more API reference comment. -extern std::unique_ptr Backward( - const OperatorBase& forwardOp, - const std::unordered_set& no_grad_vars); - -struct GradVarInfo { - GradVarInfo() {} - GradVarInfo(const std::string& name, int block_idx, int op_idx) - : name_(name), block_idx_(block_idx), op_idx_(op_idx) {} - - bool operator==(const GradVarInfo& b) const { - return name_ == b.name_ && block_idx_ == b.block_idx_ && - op_idx_ == b.op_idx_; - } - - std::string name_; - int block_idx_; - int op_idx_; -}; - -using ParamGradInfoMap = std::unordered_map; - -ParamGradInfoMap AppendBackward( - ProgramDesc& program_desc, const VarDesc& target, - const std::unordered_set& no_grad_vars); - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/backward_test.cc b/paddle/fluid/framework/backward_test.cc deleted file mode 100644 index cc1f871360ed3f7071364dbb0f932bfd997cadb0..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward_test.cc +++ /dev/null @@ -1,918 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/backward.h" - -#include -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/operators/net_op.h" - -USE_NO_KERNEL_OP(fill_constant); - -namespace paddle { -namespace framework { - -using DeviceContext = platform::DeviceContext; - -class NoneOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext *ctx) const override {} -}; - -template -class NoneKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &context) const override {} -}; - -class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { - public: - RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add"); - AddInput("b", "Bias of Add"); - AddOutput("Out", "Out of Add"); - AddComment("Add Op"); - } -}; - -class RowWiseAddGradMaker : public SingleGradOpDescMaker { - public: - using SingleGradOpDescMaker::SingleGradOpDescMaker; - - protected: - std::unique_ptr Apply() const override { - auto grad_op = new OpDesc(); - grad_op->SetInput(GradVarName("Out"), OutputGrad("Out")); - grad_op->SetOutput(GradVarName("X"), InputGrad("X")); - grad_op->SetOutput(GradVarName("b"), InputGrad("b")); - grad_op->SetType("rowwise_add_grad"); - return std::unique_ptr(grad_op); - } -}; - -class MulOpMaker : public OpProtoAndCheckerMaker { - public: - MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "A"); - AddInput("Y", "B"); - AddOutput("Out", "Out"); - AddAttr("x_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); - AddAttr("y_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); - AddComment("Mul"); - } -}; - -class SigmoidOpMaker : public OpProtoAndCheckerMaker { - public: - SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "X"); - AddOutput("Out", "Y"); - AddComment("Sigmoid"); - } -}; - -class NoGradOpMaker : public OpProtoAndCheckerMaker { - public: - NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "X input"); - AddOutput("Out", "Y output"); - AddComment("NoGradOp, same input output. no Grad"); - } -}; - -class FcOp : public operators::NetOp { - public: - FcOp(const std::string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - AppendOp(OpRegistry::CreateOp( - "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, - {{"Out", {Output("mul_result")}}}, AttributeMap{})); - auto input_b = Inputs("b"); - std::string before_act = "mul_result"; - if (input_b.size() != 0) { - AppendOp(OpRegistry::CreateOp( - "rowwise_add", {{"X", {Output("mul_result")}}, {"b", {input_b[0]}}}, - {{"Out", {Output("add_result")}}}, AttributeMap{})); - before_act = "add_result"; - } else { - auto out_varname = Output("add_result"); - if (out_varname != kEmptyVarName) { - this->Rename(out_varname, kEmptyVarName); - } - } - - AppendOp(OpRegistry::CreateOp("sigmoid", {{"X", {Output(before_act)}}}, - {{"Out", {Output("Out")}}}, AttributeMap{})); - CompleteAddOp(false); - } -}; - -class FcOpMaker : public OpProtoAndCheckerMaker { - public: - FcOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddInput("W", "w"); - AddInput("b", "b"); - AddOutput("mul_result", "").AsIntermediate(); - AddOutput("add_result", "").AsIntermediate(); - AddOutput("Out", ""); - AddComment(""); - } -}; - -class ManyOutputOpMaker : public OpProtoAndCheckerMaker { - public: - ManyOutputOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("x", "x"); - AddOutput("y", "y"); - AddOutput("z", "z"); - AddComment(""); - } -}; - -class FillZeroOpMaker : public OpProtoAndCheckerMaker { - public: - FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddOutput("Out", "out"); - AddComment(""); - } -}; - -class SumOpMaker : public framework::OpProtoAndCheckerMaker { - public: - SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); - AddComment(""); - } -}; - -class MultInOutOpMaker : public OpProtoAndCheckerMaker { - public: - MultInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddInput("H", "h"); - AddOutput("Y", "y"); - AddOutput("Z", "z"); - AddComment(""); - } -}; - -class MinusGradOpDescMaker : public GradOpDescMakerBase { - public: - using GradOpDescMakerBase::GradOpDescMakerBase; - - std::vector> operator()() const override { - std::vector> retv; - auto x_g = InputGrad("X"); - if (!x_g.empty()) { - auto *op_desc = new OpDesc(); - op_desc->SetType("scale"); - op_desc->SetInput("X", OutputGrad("Out")); - op_desc->SetOutput("Out", x_g); - op_desc->SetAttr("scale", 1.0f); - retv.emplace_back(op_desc); - } - - auto y_g = InputGrad("Y"); - if (!y_g.empty()) { - auto *op_desc = new OpDesc(); - op_desc->SetType("scale"); - op_desc->SetInput("X", OutputGrad("Out")); - op_desc->SetOutput("Out", y_g); - op_desc->SetAttr("scale", -1.0f); - retv.emplace_back(op_desc); - } - return retv; - } -}; - -class MinusOpMaker : public OpProtoAndCheckerMaker { - public: - MinusOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", ""); - AddInput("Y", ""); - AddOutput("Out", ""); - AddComment("minus for unittest"); - } -}; -} // namespace framework -} // namespace paddle - -namespace f = paddle::framework; -namespace ops = paddle::operators; -using EnforceNotMet = paddle::platform::EnforceNotMet; -// rowwise_add -REGISTER_OPERATOR(rowwise_add, f::NoneOp, f::RowWiseAddOpMaker, - f::RowWiseAddGradMaker); -REGISTER_OP_CPU_KERNEL(rowwise_add, - f::NoneKernel); -REGISTER_OPERATOR(rowwise_add_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(rowwise_add_grad, - f::NoneKernel); -// mul -REGISTER_OP(mul, f::NoneOp, f::MulOpMaker, mul_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(mul, f::NoneKernel); -REGISTER_OP_CPU_KERNEL(mul_grad, - f::NoneKernel); -// sigmoid -REGISTER_OP(sigmoid, f::NoneOp, f::SigmoidOpMaker, sigmoid_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(sigmoid, - f::NoneKernel); -REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NoneOp, f::NoGradOpMaker); -// fill_zeros_like -REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NoneOp, f::FillZeroOpMaker); -REGISTER_OP_CPU_KERNEL(fill_zeros_like, - f::NoneKernel); -// sum -REGISTER_OP(sum, f::NoneOp, f::SumOpMaker, sum_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(sum, f::NoneKernel); -REGISTER_OP_CPU_KERNEL(sum_grad, - f::NoneKernel); -// fc -REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); -// many_output_op -REGISTER_OP(many_output_op, f::NoneOp, f::ManyOutputOpMaker, - many_output_op_grad, f::NoneOp); -// mult_in_out -REGISTER_OP(mult_in_out, f::NoneOp, f::MultInOutOpMaker, mult_in_out_grad, - f::NoneOp); -REGISTER_OP_CPU_KERNEL(mult_in_out, - f::NoneKernel); -REGISTER_OP_CPU_KERNEL(mult_in_out_grad, - f::NoneKernel); -// minus -REGISTER_OPERATOR(minus, f::NoneOp, f::MinusOpMaker, f::MinusGradOpDescMaker); -REGISTER_OP_CPU_KERNEL(minus, f::NoneKernel); -// scale -REGISTER_OPERATOR(scale, f::NoneOp); -REGISTER_OP_CPU_KERNEL(scale, f::NoneKernel); - -TEST(Backward, simple_op_not_need_grad) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - auto gop = f::Backward(*fwd, {"x"}); - ASSERT_EQ(gop->Output(f::GradVarName("X")), f::kEmptyVarName); - - auto no_input_gop = f::Backward(*fwd, {"x", "b"}); - ASSERT_NE(no_input_gop, nullptr); - ASSERT_TRUE(no_input_gop->IsNetOp()); - ASSERT_EQ(0UL, static_cast(no_input_gop.get())->ops_.size()); -} - -TEST(Backward, net_fc_backward_normal) { - std::shared_ptr fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}}, - {{"mul_result", {"mul_res"}}, - {"add_result", {"add_re"}}, - {"Out", {"out"}}}, - f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = - f::Backward(*fwd, std::unordered_set{}); - ASSERT_TRUE(gop->IsNetOp()); - auto net = static_cast(gop.get()); - - ASSERT_NO_THROW(net->DebugString()); - - ASSERT_EQ(3UL, net->ops_.size()); - - f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); - - f::OperatorBase &d_add = *net->ops_[1]; - ASSERT_EQ("rowwise_add_grad", d_add.Type()); - - f::OperatorBase &d_mul = *net->ops_[2]; - ASSERT_EQ("mul_grad", d_mul.Type()); -} - -TEST(Backward, net_fc_backward_not_have_b) { - std::shared_ptr fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {}}}, - {{"mul_result", {"mul_res"}}, - {"add_result", {"add_res"}}, - {"Out", {"tmp"}}}, - f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = - f::Backward(*fwd, std::unordered_set{}); - ASSERT_TRUE(gop->IsNetOp()); - auto net = static_cast(gop.get()); - - ASSERT_NO_THROW(net->DebugString()); - - ASSERT_EQ(2UL, net->ops_.size()); - - f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); - - f::OperatorBase &d_mul = *net->ops_[1]; - ASSERT_EQ("mul_grad", d_mul.Type()); -} - -TEST(Backward, net_input_of_network_not_need_grad) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"x"}}, {"W", {"W1"}}, {"b", {"b1"}}}, - {{"mul_result", {"mul_tmp_0"}}, - {"add_result", {"add_tmp_0"}}, - {"Out", {"hidden0"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"hidden0"}}, {"W", {"W2"}}, {"b", {"b2"}}}, - {{"mul_result", {"mul_tmp_1"}}, - {"add_result", {"add_tmp_1"}}, - {"Out", {"hidden1"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - auto bwd = Backward(net, {"x"}); // x@GRAD is not need. - ASSERT_TRUE(bwd->IsNetOp()); - auto bwd_net = static_cast(bwd.get()); - - auto output_vars = bwd_net->OutputVars(true); - std::unordered_set all_outputs = - std::unordered_set(output_vars.begin(), output_vars.end()); - all_outputs.erase(f::kEmptyVarName); - - for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { - ASSERT_NE(all_outputs.find(f::GradVarName(out)), all_outputs.end()); - } - - // Not Generated X - ASSERT_EQ(all_outputs.find(f::GradVarName("X")), all_outputs.end()); - - ASSERT_EQ(2UL, bwd_net->ops_.size()); - ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); - auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); - ASSERT_EQ(3UL, first_fc_grad->ops_.size()); - ASSERT_EQ(f::kEmptyVarName, - first_fc_grad->ops_[2]->Output(f::GradVarName("X"))); -} - -TEST(Backward, net_shared_weight) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"x"}}, {"Y", {"w"}}}, - {{"Out", {"out"}}}, f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"out"}}, {"Y", {"w"}}}, - {{"Out", {"FinalOut"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - - auto bwd = f::Backward(net, std::unordered_set{}); - ASSERT_TRUE(bwd->IsNetOp()); - auto bwd_net = static_cast(bwd.get()); - ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("sum", bwd_net->ops_[2]->Type()); -} - -TEST(Backward, op_all_input_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"x", "b"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_TRUE(net->ops_.empty()); -} - -TEST(Backward, op_all_output_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"out"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_TRUE(net->ops_.empty()); -} - -TEST(Backward, op_part_of_output_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}}, - {{"y", {"Y"}}, {"z", {"Z"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"Z"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 2UL); - - auto &fill_zero = *net->ops_[0]; - ASSERT_EQ("fill_zeros_like", fill_zero.Type()); - ASSERT_EQ(1UL, fill_zero.Inputs("X").size()); - ASSERT_EQ("Z", fill_zero.Input("X")); - ASSERT_EQ(1UL, fill_zero.Outputs("Out").size()); - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Out")); - - auto &d_many_out = *net->ops_[1]; - ASSERT_EQ("many_output_op_grad", d_many_out.Type()); - ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.Inputs().size()); // I/O/OG - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, - d_many_out.Input(f::GradVarName("z"))); - ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y"))); - ASSERT_EQ(f::GradVarName("X"), d_many_out.Output(f::GradVarName("x"))); -} - -TEST(Backward, op_part_of_input_are_not_need) { - auto fwd = f::OpRegistry::CreateOp("mul", {{"X", {"a"}}, {"Y", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"a"}); - auto &grad_mul = *backward; - ASSERT_EQ(grad_mul.Type(), "mul_grad"); - ASSERT_EQ(grad_mul.Inputs().size(), 2UL + 1UL + 1UL); - ASSERT_EQ(grad_mul.Outputs().size(), 2UL); - ASSERT_EQ(grad_mul.Output(f::GradVarName("X")), f::kEmptyVarName); - ASSERT_EQ(grad_mul.Output(f::GradVarName("Y")), f::GradVarName("b")); - ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); - ASSERT_EQ(grad_mul.Input("X"), "a"); - ASSERT_EQ(grad_mul.Input("Y"), "b"); - ASSERT_EQ(grad_mul.Input("Out"), "out"); -} - -TEST(Backward, linear_net_intermediate_variable_has_no_grad) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"x1"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"mul_result", {"mul_out1"}}, - {"add_result", {"add_out1"}}, - {"Out", {"out1"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"out1"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"mul_result", {"mul_out2"}}, - {"add_result", {"tmp_out2"}}, - {"Out", {"out2"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"out2"}}, {"W", {"w3"}}, {"b", {"b3"}}}, - {{"mul_result", {"mul_out3"}}, - {"add_result", {"tmp_out3"}}, - {"Out", {"out3"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - - auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); - ASSERT_TRUE(backward->IsNetOp()); - auto bwd_net = static_cast(backward.get()); - ASSERT_EQ(bwd_net->ops_.size(), 3UL); - auto &grad_fc = *bwd_net->ops_[0]; - - const char *all = paddle::operators::NetOp::kAll; - EXPECT_EQ(grad_fc.Inputs(all).size(), - 2UL /* external input number */ - + 1UL /* external output number*/ - + 1UL /* number of gradient of external output*/ - + 2UL /* internal variable number*/ - ); - EXPECT_EQ(grad_fc.Outputs(all).size(), - 2UL /* input number of mul*/ - + 2UL /* input number of rowwise_add*/ - + 1UL /* input number of sigmod */ - - 1UL /* out2 is not needed*/); - EXPECT_EQ(bwd_net->ops_[1]->Inputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[1]->Outputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->Inputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL); -} - -TEST(Backward, simple_single_op) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - - f::OpDesc *op = block->AppendOp(); - op->SetType("rowwise_add"); - op->SetInput("X", {"x"}); - op->SetInput("b", {"b"}); - op->SetOutput("Out", {"out"}); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 3UL); - f::OpDesc *fill_op = block->AllOps()[1]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op = block->AllOps()[2]; - EXPECT_EQ(grad_op->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op->InputNames().size(), 1UL); - ASSERT_EQ(grad_op->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b")})); - - EXPECT_EQ(var_to_grad.size(), 3UL); - EXPECT_EQ(var_to_grad.at("b"), f::GradVarInfo(f::GradVarName("b"), 0, 2)); - EXPECT_EQ(var_to_grad.at("x"), f::GradVarInfo(f::GradVarName("x"), 0, 2)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("b"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x"))); -} - -TEST(Backward, default_attribute) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op = block->AppendOp(); - op->SetType("mul"); - op->SetInput("X", {"x"}); - op->SetInput("Y", {"y"}); - op->SetOutput("Out", {"out"}); - op->CheckAttrs(); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 3UL); - EXPECT_EQ(boost::get(op->GetAttr("x_num_col_dims")), 1); - EXPECT_EQ(boost::get(op->GetAttr("y_num_col_dims")), 1); - - f::OpDesc *fill_op = block->AllOps()[1]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op = block->AllOps()[2]; - ASSERT_EQ(grad_op->Type(), "mul_grad"); - EXPECT_EQ(boost::get(grad_op->GetAttr("x_num_col_dims")), 1); - EXPECT_EQ(boost::get(grad_op->GetAttr("y_num_col_dims")), 1); -} - -TEST(Backward, simple_mult_op) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"out1"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out2"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - auto target = f::VarDesc("out3"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 6UL + 1); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op1 = block->AllOps()[6]; - EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - f::OpDesc *grad_op2 = block->AllOps()[5]; - EXPECT_EQ(grad_op2->Type(), "mul_grad"); - ASSERT_EQ(grad_op2->InputNames().size(), 4UL); - ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op2->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op2->Input("Out"), std::vector({"out2"})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - - f::OpDesc *grad_op3 = block->AllOps()[4]; - EXPECT_EQ(grad_op3->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op3->InputNames().size(), 1UL); - ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out3")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b3")})); - - EXPECT_EQ(var_to_grad.size(), 7UL); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 5)); - EXPECT_EQ(var_to_grad.at("y2"), f::GradVarInfo(f::GradVarName("y2"), 0, 5)); - EXPECT_EQ(var_to_grad.at("out2"), - f::GradVarInfo(f::GradVarName("out2"), 0, 4)); - EXPECT_EQ(var_to_grad.at("b3"), f::GradVarInfo(f::GradVarName("b3"), 0, 4)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("y2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b3"))); -} - -TEST(Backward, intermedia_var_no_grad) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"x2"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out2"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - f::OpDesc *op4 = block->AppendOp(); - op4->SetType("mul"); - op4->SetInput("X", {"out1"}); - op4->SetInput("Y", {"out3"}); - op4->SetOutput("Out", {"out4"}); - - auto target = f::VarDesc("out4"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"out3"}); - - ASSERT_EQ(block->AllOps().size(), 7UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op1 = block->AllOps()[6]; - EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - f::OpDesc *grad_op4 = block->AllOps()[5]; - EXPECT_EQ(grad_op4->Type(), "mul_grad"); - ASSERT_EQ(grad_op4->InputNames().size(), 4UL); - ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op4->Input("Y"), std::vector({"out3"})); - EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out4"})); - EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out4")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), std::vector()); - - EXPECT_EQ(var_to_grad.size(), 4UL); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 5)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); -} - -TEST(Backward, var_no_grad) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("mult_in_out"); - op1->SetInput("X", {"x1"}); - op1->SetInput("H", {"h1"}); - op1->SetOutput("Y", {"y1"}); - op1->SetOutput("Z", {"z1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mult_in_out"); - op2->SetInput("X", {"y1"}); - op2->SetInput("H", {"z1"}); - op2->SetOutput("Y", {"y2"}); - op2->SetOutput("Z", {"z2"}); - - auto target = f::VarDesc("z2"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"z1"}); - - ASSERT_EQ(block->AllOps().size(), 6UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op2 = block->AllOps()[3]; - ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); - ASSERT_EQ(grad_op2->InputNames().size(), 6UL); - ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op2->Input("X"), std::vector({"y1"})); - EXPECT_EQ(grad_op2->Input("H"), std::vector({"z1"})); - EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op2->Input("Z"), std::vector({"z2"})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Z")), - std::vector({f::GradVarName("z2")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), - std::vector({f::GradVarName("y1")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), std::vector()); - - f::OpDesc *fill_zero_op = block->AllOps()[4]; - ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); - ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); - ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); - EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); - EXPECT_EQ(fill_zero_op->Output("Out"), - std::vector({std::string("z1") + f::kZeroVarSuffix})); - - f::OpDesc *grad_op1 = block->AllOps()[5]; - ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 6UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input("X"), std::vector({"x1"})); - EXPECT_EQ(grad_op1->Input("H"), std::vector({"h1"})); - EXPECT_EQ(grad_op1->Input("Y"), std::vector({"y1"})); - EXPECT_EQ(grad_op1->Input("Z"), std::vector({"z1"})); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Y")), - std::vector({f::GradVarName("y1")})); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Z")), - std::vector({std::string("z1") + f::kZeroVarSuffix})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), - std::vector({f::GradVarName("h1")})); - - EXPECT_EQ(var_to_grad.size(), 4UL); - EXPECT_EQ(var_to_grad.at("y1"), f::GradVarInfo(f::GradVarName("y1"), 0, 3)); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 5)); - EXPECT_EQ(var_to_grad.at("h1"), f::GradVarInfo(f::GradVarName("h1"), 0, 5)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("y1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("h1"))); -} - -TEST(Backward, shared_var) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"out1"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out1"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - auto target = f::VarDesc("out3"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 8UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op3 = block->AllOps()[4]; - ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op3->InputNames().size(), 1UL); - ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out3")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1") + "@RENAME@0"})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b3")})); - - f::OpDesc *grad_op4 = block->AllOps()[5]; - ASSERT_EQ(grad_op4->Type(), "mul_grad"); - ASSERT_EQ(grad_op4->InputNames().size(), 4UL); - ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op4->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out2"})); - EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1") + "@RENAME@1"})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - - f::OpDesc *sum_op = block->AllOps()[6]; - ASSERT_EQ(sum_op->Type(), "sum"); - ASSERT_EQ(sum_op->InputNames().size(), 1UL); - ASSERT_EQ(sum_op->OutputNames().size(), 1UL); - EXPECT_EQ(sum_op->Input("X"), - std::vector({f::GradVarName("out1") + "@RENAME@0", - f::GradVarName("out1") + "@RENAME@1"})); - EXPECT_EQ(sum_op->Output("Out"), - std::vector({f::GradVarName("out1")})); - - f::OpDesc *grad_op1 = block->AllOps()[7]; - ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - EXPECT_EQ(var_to_grad.size(), 6UL); - EXPECT_EQ(var_to_grad.at("b3"), f::GradVarInfo(f::GradVarName("b3"), 0, 4)); - EXPECT_EQ(var_to_grad.at("y2"), f::GradVarInfo(f::GradVarName("y2"), 0, 5)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 7)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 7)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("b3"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("y2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); -} - -TEST(Backward, half_backward) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - auto *op1 = block->AppendOp(); - op1->SetType("minus"); - op1->SetInput("X", {"a"}); - op1->SetInput("Y", {"b"}); - op1->SetOutput("Out", {"out"}); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"b"}); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - auto ops = block->AllOps(); - ASSERT_EQ(3UL, ops.size()); - - EXPECT_EQ(var_to_grad.size(), 2UL); - EXPECT_EQ(var_to_grad.at("a"), - f::GradVarInfo(f::GradVarName("a"), 0, forward_len + 1)); -} diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 3693bc25d81a8309df1a6ddf3d9b08d484596ea9..b8847e4b909cbab67b2ddb6885b45b73d402de19 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -13,11 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" +#include #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" -#include - namespace paddle { namespace framework { @@ -147,14 +146,6 @@ void BlockDesc::RemoveOp(size_t s, size_t e) { if (ops_.begin() + s == ops_.end() || ops_.begin() + e == ops_.end()) { return; } - need_update_ = true; - for (auto it = ops_.begin() + s; it != ops_.begin() + e; it++) { - auto names = (*it)->InputArgumentNames(); - for (auto n : names) { - // TODO(typhoonzero): delete vars if no other op use it. - VLOG(3) << "deleting var " << n; - } - } ops_.erase(ops_.begin() + s, ops_.begin() + e); } diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 185f018ac1b5863e0ee86fdaa17df1ccbc6e030e..eef19c4f09c60b9df18f154c85c421f5bff9413f 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include #include @@ -89,8 +90,15 @@ class BlockDesc { OpDesc *InsertOp(size_t index); + /* + * Remove Op and its input/output variables. + * Note that for either input or output variable, if it is also an input or + * output variable of other ops, we should remain it. + */ void RemoveOp(size_t s, size_t e); + void RemoveVar(const std::string &name) { vars_.erase(name); } + std::vector AllOps() const; size_t OpSize() const { return ops_.size(); } diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 019bea600f496a6b58579ad0aa8af836cd6134a9..722bf8e8ecba0c9cbc5e3ad737dbf73148d2873c 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include // for size_t -#include +#include // for size_t +#include // NOLINT #include #include "paddle/fluid/platform/enforce.h" @@ -216,7 +216,8 @@ class ChannelHolder { template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(size_t buffer_size) : type_(std::type_index(typeid(T))) { + explicit PlaceholderImpl(size_t buffer_size) + : type_(std::type_index(typeid(T))) { channel_.reset(MakeChannel(buffer_size)); } diff --git a/paddle/fluid/framework/channel_impl.h b/paddle/fluid/framework/channel_impl.h index 378a0bab1cc7408266fa45a0b3dc19619dd4fb4c..26d454534e1ae38c4f83376c0836a45781ea9101 100644 --- a/paddle/fluid/framework/channel_impl.h +++ b/paddle/fluid/framework/channel_impl.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include // for size_t #include -#include +#include // NOLINT #include #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/platform/enforce.h" @@ -38,7 +38,7 @@ class ChannelImpl : public paddle::framework::Channel { virtual void Unlock(); virtual bool IsClosed(); virtual void Close(); - ChannelImpl(size_t); + explicit ChannelImpl(size_t); virtual ~ChannelImpl(); virtual void AddToSendQ(const void *referrer, T *data, @@ -60,7 +60,7 @@ class ChannelImpl : public paddle::framework::Channel { const void *referrer; // TODO(thuan): figure out better way to do this std::function callback; - QueueMessage(T *item) + explicit QueueMessage(T *item) : data(item), cond(std::make_shared()) {} QueueMessage(T *item, std::shared_ptr cond) @@ -87,6 +87,21 @@ class ChannelImpl : public paddle::framework::Channel { return value; } + std::shared_ptr get_first_message( + std::deque> *queue, ChannelAction action) { + while (!queue->empty()) { + // Check whether this message was added by Select + // If this was added by Select then execute the callback + // to check if you can execute this message. The callback + // can return false if some other case was executed in Select. + // In that case just discard this QueueMessage and process next. + std::shared_ptr m = queue->front(); + queue->pop_front(); + if (m->callback == nullptr || m->callback(action)) return m; + } + return nullptr; + } + size_t cap_; std::recursive_mutex mu_; bool closed_; @@ -123,44 +138,27 @@ void ChannelImpl::Send(T *item) { // If channel is closed, throw exception if (closed_) { - lock.unlock(); send_return(); + lock.unlock(); PADDLE_THROW("Cannot send on closed channel"); } // If there is a receiver, directly pass the value we want // to send to the receiver, bypassing the channel buffer if any if (!recvq.empty()) { - std::shared_ptr m = recvq.front(); - recvq.pop_front(); - // Do the data transfer - // We will do this data transfer if either of the following - // cases are true - // 1. callback == nullptr // This means it was a regular channel send - // 2. callback returns true - bool do_send = true; - if (m->callback != nullptr) do_send = m->callback(ChannelAction::SEND); - if (do_send) + std::shared_ptr m = + get_first_message(&recvq, ChannelAction::SEND); + + if (m != nullptr) { *(m->data) = std::move(*item); - else { - // We cannot do the data transfer because - // this QueueMessage was added by Select - // and some other case was executed. - // So call the Send function again. - // We do not care about notifying other - // because they would have been notified - // by the executed select case. - lock.unlock(); + m->Notify(); + send_return(); + return; + } else { Send(item); send_return(); return; } - - // Wake up the blocked process and unlock - m->Notify(); - lock.unlock(); - send_return(); - return; } // Unbuffered channel will always bypass this @@ -169,8 +167,6 @@ void ChannelImpl::Send(T *item) { if (buf_.size() < cap_) { // Copy to buffer buf_.push_back(std::move(*item)); - // Release lock and return true - lock.unlock(); send_return(); return; } @@ -181,8 +177,8 @@ void ChannelImpl::Send(T *item) { sendq.push_back(m); m->Wait(lock); if (m->chan_closed) { - lock.unlock(); send_return(); + lock.unlock(); PADDLE_THROW("Cannot send on closed channel"); } send_return(); @@ -195,39 +191,38 @@ bool ChannelImpl::Receive(T *item) { // If channel is closed and buffer is empty or // channel is unbuffered - if (closed_ && buf_.empty()) { - lock.unlock(); - return recv_return(false); - } + if (closed_ && buf_.empty()) return recv_return(false); // If there is a sender, directly receive the value we want - // from the sender, bypassing the channel buffer if any + // from the sender. In case of a buffered channel, read from + // buffer and move front of send queue to the buffer if (!sendq.empty()) { - std::shared_ptr m = sendq.front(); - sendq.pop_front(); - // Do the data transfer - // We will do this data transfer if either of the following - // cases are true - // 1. callback == nullptr // This means it was a regular channel send - // 2. callback returns true - bool do_receive = true; - if (m->callback != nullptr) - do_receive = m->callback(ChannelAction::RECEIVE); - if (do_receive) - *item = std::move(*(m->data)); - else - // We cannot do the data transfer because - // this QueueMessage was added by Select - // and some other case was executed. - // So call the Receive function again. - // We do not care about notifying other - // because they would have been notified - // by the executed select case. - return recv_return(Receive(item)); - - // Wake up the blocked process and unlock - m->Notify(); - lock.unlock(); + std::shared_ptr m = + get_first_message(&sendq, ChannelAction::RECEIVE); + if (buf_.size() > 0) { + // Case 1 : Channel is Buffered + // Do Data transfer from front of buffer + // and add a QueueMessage to the buffer + *item = std::move(buf_.front()); + buf_.pop_front(); + // If first message from sendq is not null + // add it to the buffer and notify it + if (m != nullptr) { + // Copy to buffer + buf_.push_back(std::move(*(m->data))); + m->Notify(); + } // Ignore if there is no first message + } else { + // Case 2: Channel is Unbuffered + // Do data transfer from front of SendQ + // If front is nullptr, then recursively call itself + if (m != nullptr) { + *item = std::move(*(m->data)); + m->Notify(); + } else { + return recv_return(Receive(item)); + } + } return recv_return(true); } @@ -236,8 +231,7 @@ bool ChannelImpl::Receive(T *item) { // Directly read from buffer *item = std::move(buf_.front()); buf_.pop_front(); - // Release lock and return true - lock.unlock(); + // return true return recv_return(true); } diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index e2380bb54bd25c4f30f79cad30f95f7cb056eef0..542d791f6bbdf7d68a4786998ccc0233fff6473d 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/framework/channel.h" -#include -#include +#include // NOLINT +#include // NOLINT #include "gtest/gtest.h" using paddle::framework::Channel; @@ -36,23 +36,25 @@ TEST(Channel, ChannelCapacityTest) { delete ch; } -void RecevingOrderEqualToSendingOrder(Channel *ch) { +void RecevingOrderEqualToSendingOrder(Channel *ch, int num_items) { unsigned sum_send = 0; std::thread t([&]() { - for (int i = 0; i < 5; i++) { + for (int i = 0; i < num_items; i++) { ch->Send(&i); sum_send += i; } }); - for (int i = 0; i < 5; i++) { - int recv = 999; + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + for (int i = 0; i < num_items; i++) { + int recv = -1; EXPECT_EQ(ch->Receive(&recv), true); EXPECT_EQ(recv, i); } std::this_thread::sleep_for(std::chrono::milliseconds(200)); CloseChannel(ch); t.join(); - EXPECT_EQ(sum_send, 10U); + unsigned expected_sum = (num_items * (num_items - 1)) / 2; + EXPECT_EQ(sum_send, expected_sum); delete ch; } @@ -164,9 +166,9 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { std::thread t([&]() { // Try to write more than buffer size. for (size_t i = 0; i < 2 * buffer_size; ++i) { - if (i < buffer_size) + if (i < buffer_size) { ch->Send(&i); // should block after 10 iterations - else { + } else { bool is_exception = false; try { ch->Send(&i); @@ -185,21 +187,37 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { TEST(Channel, RecevingOrderEqualToSendingOrderWithUnBufferedChannel) { auto ch = MakeChannel(0); - RecevingOrderEqualToSendingOrder(ch); + RecevingOrderEqualToSendingOrder(ch, 20); +} + +TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel1) { + // Test that Receive Order is same as Send Order when number of items + // sent is less than size of buffer + auto ch = MakeChannel(10); + RecevingOrderEqualToSendingOrder(ch, 5); +} + +TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel2) { + // Test that Receive Order is same as Send Order when number of items + // sent is equal to size of buffer + auto ch = MakeChannel(10); + RecevingOrderEqualToSendingOrder(ch, 10); } -TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel) { +TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel3) { + // Test that Receive Order is same as Send Order when number of items + // sent is greater than the size of buffer auto ch = MakeChannel(10); - RecevingOrderEqualToSendingOrder(ch); + RecevingOrderEqualToSendingOrder(ch, 20); } void ChannelCloseUnblocksReceiversTest(Channel *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -212,7 +230,7 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } @@ -223,21 +241,21 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -259,13 +277,13 @@ void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { if (isBuffered) { // If ch is Buffered, atleast 4 threads must be blocked. int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (!thread_ended[i]) ct++; } EXPECT_GE(ct, 4); } else { // If ch is UnBuffered, all the threads should be blocked. - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -276,21 +294,21 @@ void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } if (isBuffered) { // Verify that only 1 send was successful int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } // Only 1 send must be successful EXPECT_EQ(ct, 1); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that closing a buffered channel also unblocks @@ -391,13 +409,13 @@ TEST(Channel, UnbufferedMoreReceiveLessSendTest) { // This tests that destroying a channel unblocks // any senders waiting for channel to have write space void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -420,14 +438,14 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { if (isBuffered) { // If channel is buffered, verify that atleast 4 threads are blocked int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (thread_ended[i] == false) ct++; } // Atleast 4 threads must be blocked EXPECT_GE(ct, 4); } else { // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -436,13 +454,13 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } // Count number of successful sends int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } @@ -455,18 +473,18 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { } // Join all threads - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that destroying a channel also unblocks // any receivers waiting on the channel void ChannelDestroyUnblockReceivers(Channel *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -480,18 +498,18 @@ void ChannelDestroyUnblockReceivers(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait // Verify that all threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } // delete the channel delete ch; std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(Channel, BufferedChannelDestroyUnblocksReceiversTest) { @@ -661,12 +679,12 @@ TEST(ChannelHolder, TypeMismatchReceiveTest) { } void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -679,7 +697,7 @@ void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } @@ -690,21 +708,21 @@ void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -726,13 +744,13 @@ void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { if (isBuffered) { // If ch is Buffered, atleast 4 threads must be blocked. int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (!thread_ended[i]) ct++; } EXPECT_GE(ct, 4); } else { // If ch is UnBuffered, all the threads should be blocked. - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -743,21 +761,21 @@ void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } if (isBuffered) { // Verify that only 1 send was successful int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } // Only 1 send must be successful EXPECT_EQ(ct, 1); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that closing a channelholder unblocks @@ -795,13 +813,13 @@ TEST(Channel, ChannelHolderCloseUnblocksSendersTest) { // This tests that destroying a channelholder unblocks // any senders waiting for channel void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -823,14 +841,14 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { if (isBuffered) { // If channel is buffered, verify that atleast 4 threads are blocked int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (thread_ended[i] == false) ct++; } // Atleast 4 threads must be blocked EXPECT_GE(ct, 4); } else { // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -839,13 +857,13 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } // Count number of successfuld sends int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } @@ -858,18 +876,18 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { } // Join all threads - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that destroying a channelholder also unblocks // any receivers waiting on the channel void ChannelHolderDestroyUnblockReceivers(ChannelHolder *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -883,18 +901,18 @@ void ChannelHolderDestroyUnblockReceivers(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } // delete the channel delete ch; std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(ChannelHolder, ChannelHolderDestroyUnblocksReceiversTest) { @@ -927,12 +945,12 @@ TEST(ChannelHolder, ChannelHolderDestroyUnblocksSendersTest) { // This tests that closing a channelholder many times. void ChannelHolderManyTimesClose(ChannelHolder *ch) { - const int num_threads = 15; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const int kNumThreads = 15; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to send data to channel. - for (size_t i = 0; i < num_threads / 3; i++) { + for (size_t i = 0; i < kNumThreads / 3; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *ended) { @@ -944,7 +962,7 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { } // Launches threads that try to receive data to channel. - for (size_t i = num_threads / 3; i < 2 * num_threads / 3; i++) { + for (size_t i = kNumThreads / 3; i < 2 * kNumThreads / 3; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -958,7 +976,7 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { } // Launches threads that try to close the channel. - for (size_t i = 2 * num_threads / 3; i < num_threads; i++) { + for (size_t i = 2 * kNumThreads / 3; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -973,13 +991,13 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait // Verify that all threads are unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } EXPECT_TRUE(ch->IsClosed()); // delete the channel delete ch; - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(ChannelHolder, ChannelHolderManyTimesCloseTest) { diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index e896a06162527ed0289767901f4b4a33fcd2875f..a66525303da58601f85c40c41854edaf22c3d4ea 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -105,7 +105,7 @@ static void BuildVar(const std::string& param_name, TEST(Operator, CPUtoGPU) { using namespace paddle::framework; using namespace paddle::platform; - InitDevices(); + InitDevices(true); paddle::framework::Scope scope; paddle::platform::CPUPlace cpu_place; diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..897e41f79f4e3bb9cecbe7b42fc6c4fd3401d839 --- /dev/null +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -0,0 +1,30 @@ +cc_library(var_handle SRCS var_handle.cc DEPS place) +cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context lod_tensor) +cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) +cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) +nv_library(nccl_all_reduce_op_handle SRCS nccl_all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory + dynload_cuda) +cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) +cc_library(send_op_handle SRCS send_op_handle.cc DEPS framework_proto scope place operator op_registry) + +cc_library(ssa_graph SRCS ssa_graph.cc DEPS var_handle op_handle_base) +cc_library(ssa_graph_builder SRCS ssa_graph_builder.cc DEPS ssa_graph) + +if(WITH_GPU) + set(multi_devices_graph_builder_deps nccl_all_reduce_op_handle) +else() + set(multi_devices_graph_builder_deps) +endif() +cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle + scale_loss_grad_op_handle send_op_handle ${multi_devices_graph_builder_deps}) +cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph framework_proto) +cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope + simple_threadpool device_context) + +cc_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory) +cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory) + +cc_test(broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory + device_context broadcast_op_handle) +cc_test(gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory + device_context gather_op_handle) diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d29012380e1b1710704d71a28d21dcc3097eb51 --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/broadcast_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +Tensor *GetTensorFromVar(Variable *in_var) { + if (in_var->IsType()) { + return in_var->GetMutable(); + } else if (in_var->IsType()) { + return in_var->GetMutable()->mutable_value(); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows"); + } + return nullptr; +} + +BroadcastOpHandle::BroadcastOpHandle(const std::vector &local_scopes, + const std::vector &places) + : local_scopes_(local_scopes), places_(places) {} + +void BroadcastOpHandle::RunImpl() { + // the input may have dummy var. + std::vector in_var_handle; + for (auto *in : inputs_) { + auto *out_handle = dynamic_cast(in); + if (out_handle) { + in_var_handle.push_back(out_handle); + } + } + PADDLE_ENFORCE_EQ(in_var_handle.size(), 1, + "The number of input should be one."); + + // the output may have dummy var. + std::vector out_var_handles; + for (auto *out : outputs_) { + auto *out_handle = dynamic_cast(out); + if (out_handle) { + out_var_handles.push_back(out_handle); + } + } + + PADDLE_ENFORCE_EQ( + out_var_handles.size(), places_.size(), + "The number of output should equal to the number of places."); + + // Wait input done, this Wait is asynchronous operation + auto &in_place = in_var_handle[0]->place_; + if (in_var_handle[0]->generated_op_) { + for (auto *out : out_var_handles) { + auto &out_p = out->place_; + in_var_handle[0]->generated_op_->Wait(dev_ctxes_[out_p]); + } + } + + // + auto in_scope_idx = in_var_handle[0]->scope_idx_; + auto in_var = + local_scopes_.at(in_scope_idx)->FindVar(in_var_handle[0]->name_); + Tensor *in_tensor = GetTensorFromVar(in_var); + + for (auto *out : out_var_handles) { + auto &out_p = out->place_; + auto out_var = local_scopes_.at(out->scope_idx_)->FindVar(out->name_); + + PADDLE_ENFORCE_EQ(out_p.which(), in_place.which(), + "Places must be all on CPU or all on CUDA."); + + if (in_var->IsType()) { + auto &in_sr = in_var->Get(); + auto out_sr = out_var->GetMutable(); + if (&in_sr == out_sr) continue; + out_sr->set_height(in_sr.height()); + out_sr->set_rows(in_sr.rows()); + out_sr->mutable_value()->Resize(in_sr.value().dims()); + out_sr->mutable_value()->mutable_data(out_p, in_sr.value().type()); + } else if (in_var->IsType()) { + auto in_lod = in_var->Get(); + auto out_lod = out_var->GetMutable(); + if (&in_lod == out_lod) continue; + out_lod->set_lod(in_lod.lod()); + out_lod->Resize(in_lod.dims()); + out_lod->mutable_data(out_p, in_lod.type()); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows."); + } + + Tensor *out_tensor = GetTensorFromVar(out_var); + paddle::framework::TensorCopy(*in_tensor, out_p, *(dev_ctxes_[in_place]), + out_tensor); + } +} + +std::string BroadcastOpHandle::Name() const { return "broadcast"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..b3292422522b64a38a50f39f04e6f0d2e15492dd --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct BroadcastOpHandle : public OpHandleBase { + const std::vector &local_scopes_; + const std::vector &places_; + + BroadcastOpHandle(const std::vector &local_scopes, + const std::vector &places); + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..dfc52b012f8b6bf5cf1a3feab90dc1ec7842ad6c --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -0,0 +1,231 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/broadcast_op_handle.h" +#include "gtest/gtest.h" + +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +namespace f = paddle::framework; +namespace p = paddle::platform; + +// test data amount +const f::DDim kDims = {20, 20}; + +struct TestBroadcastOpHandle { + std::vector> ctxs_; + std::vector local_scopes_; + Scope g_scope_; + std::unique_ptr op_handle_; + std::vector> vars_; + std::vector gpu_list_; + + void WaitAll() { + for (size_t j = 0; j < ctxs_.size(); ++j) { + ctxs_[j]->Wait(); + } + } + + void InitCtxOnGpu(bool use_gpu) { + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + int count = p::GetCUDADeviceCount(); + if (count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " + "device count is " + << count; + exit(0); + } + for (int i = 0; i < count; ++i) { + auto p = p::CUDAPlace(i); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CUDADeviceContext(p)); + } +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { + int count = 8; + for (int i = 0; i < count; ++i) { + auto p = p::CPUPlace(); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CPUDeviceContext(p)); + } + } + } + + void InitBroadcastOp(size_t input_scope_idx) { + for (size_t j = 0; j < gpu_list_.size(); ++j) { + local_scopes_.push_back(&(g_scope_.NewScope())); + local_scopes_[j]->Var("out"); + } + local_scopes_[input_scope_idx]->Var("input"); + + op_handle_.reset(new BroadcastOpHandle(local_scopes_, gpu_list_)); + + vars_.emplace_back(new VarHandle()); + VarHandle* in_var_handle = static_cast(vars_.back().get()); + in_var_handle->place_ = gpu_list_[input_scope_idx]; + in_var_handle->name_ = "input"; + in_var_handle->version_ = 1; + in_var_handle->scope_idx_ = input_scope_idx; + in_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_var_handle); + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* dummy_var_handle = + static_cast(vars_.back().get()); + dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(dummy_var_handle); + + for (size_t j = 0; j < gpu_list_.size(); ++j) { + op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); + vars_.emplace_back(new VarHandle()); + VarHandle* out_var_handle = static_cast(vars_.back().get()); + out_var_handle->place_ = gpu_list_[j]; + out_var_handle->name_ = "out"; + out_var_handle->version_ = 2; + out_var_handle->scope_idx_ = j; + op_handle_->AddOutput(out_var_handle); + } + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* out_dummy_var_handle = + static_cast(vars_.back().get()); + out_dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddOutput(out_dummy_var_handle); + } + + void TestBroadcastLodTensor(size_t input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_lod_tensor = in_var->GetMutable(); + in_lod_tensor->mutable_data(kDims, gpu_list_[input_scope_idx]); + + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + f::LoD lod{{0, 10, 20}}; + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor); + in_lod_tensor->set_lod(lod); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + for (size_t j = 0; j < gpu_list_.size(); ++j) { + auto out_var = local_scopes_[j]->Var("out"); + auto out_tensor = out_var->Get(); + PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); + + f::Tensor result_tensor; + f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); + float* ct = result_tensor.mutable_data(cpu_place); + + for (int64_t i = 0; i < f::product(kDims); ++i) { + ASSERT_NEAR(ct[i], send_vector[i], 1e-5); + } + } + } + + void TestBroadcastSelectedRows(size_t input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + auto value = in_selected_rows->mutable_value(); + value->mutable_data(kDims, gpu_list_[input_scope_idx]); + int height = static_cast(kDims[0]) * 2; + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + in_selected_rows->set_height(height); + in_selected_rows->set_rows(rows); + + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), value); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + for (size_t j = 0; j < gpu_list_.size(); ++j) { + auto out_var = local_scopes_[j]->Var("out"); + auto& out_select_rows = out_var->Get(); + auto rt = out_select_rows.value(); + + PADDLE_ENFORCE_EQ(out_select_rows.height(), height, + "height is not equal."); + for (size_t k = 0; k < out_select_rows.rows().size(); ++k) { + PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k]); + } + + f::Tensor result_tensor; + f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); + float* ct = result_tensor.data(); + + for (int64_t i = 0; i < f::product(kDims); ++i) { + ASSERT_NEAR(ct[i], send_vector[i], 1e-5); + } + } + } +}; + +TEST(BroadcastTester, TestCPUBroadcastTestLodTensor) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastLodTensor(input_scope_idx); +} + +TEST(BroadcastTester, TestCPUBroadcastTestSelectedRows) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastSelectedRows(input_scope_idx); +} + +#ifdef PADDLE_WITH_CUDA +TEST(BroadcastTester, TestGPUBroadcastTestLodTensor) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(true); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastLodTensor(input_scope_idx); +} + +TEST(BroadcastTester, TestGPUBroadcastTestSelectedRows) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(true); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastSelectedRows(input_scope_idx); +} +#endif + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..ff6d91c1dafb0ab4cabb1646cc333e19a89eb812 --- /dev/null +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/computation_op_handle.h" + +#include + +namespace paddle { +namespace framework { +namespace details { +ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope, + platform::Place place) + : op_(framework::OpRegistry::CreateOp(op_desc)), + scope_(scope), + place_(place) {} + +void ComputationOpHandle::RunImpl() { + auto *cur_ctx = dev_ctxes_[place_]; + for (auto *in : inputs_) { + bool need_wait = + in->generated_op_ && in->generated_op_->dev_ctxes_[place_] != cur_ctx; + if (need_wait) { + in->generated_op_->Wait(cur_ctx); + } + } + + this->RunAndRecordEvent([this] { + op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); + }); +} + +std::string ComputationOpHandle::Name() const { return op_->Type(); } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/computation_op_handle.h b/paddle/fluid/framework/details/computation_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..d6d2d731ca80a0fbc0a2a34027b5b7c3c1977c07 --- /dev/null +++ b/paddle/fluid/framework/details/computation_op_handle.h @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { +struct ComputationOpHandle : public OpHandleBase { + std::unique_ptr op_; + Scope *scope_; + platform::Place place_; + + ComputationOpHandle(const OpDesc &op_desc, Scope *scope, + platform::Place place); + + std::string Name() const override; + + protected: + void RunImpl() override; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..e3e7c55d153aec8ce9c25c962821b266eaa84fe4 --- /dev/null +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/fetch_op_handle.h" + +#include +#include + +namespace paddle { +namespace framework { +namespace details { + +FetchOpHandle::FetchOpHandle(FeedFetchList *data, size_t offset, + std::vector *local_scopes) + : data_(data), offset_(offset), local_scopes_(local_scopes) {} + +FetchOpHandle::~FetchOpHandle() { + for (auto *input_var : inputs_) { + input_var->pending_ops_.erase(this); + } +} + +void FetchOpHandle::Wait(platform::DeviceContext *waited_dev) { + PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); +} + +void FetchOpHandle::WaitAndMergeCPUTensors() const { + std::vector tensors_ptr; + tensors_ptr.reserve(tensors_.size()); + for (auto &t : tensors_) { + tensors_ptr.emplace_back(&t); + } + data_->at(offset_).MergeLoDTensor(tensors_ptr, platform::CPUPlace()); +} + +void FetchOpHandle::RunImpl() { + auto cpu_ctx = + platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); + for (auto *input : inputs_) { + auto *var = static_cast(input); + var->generated_op_->Wait(cpu_ctx); + } + + tensors_.resize(inputs_.size()); + auto *var = static_cast(inputs_[0]); + auto &var_name = var->name_; + platform::CPUPlace cpu; + auto &scopes = *local_scopes_; + + for (size_t i = 0; i < scopes.size(); ++i) { + auto &scope = scopes[i]; + auto &t = scope->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(var_name) + ->Get(); + if (platform::is_gpu_place(var->place_)) { +#ifdef PADDLE_WITH_CUDA + TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i]); + dev_ctxes_[t.place()]->Wait(); +#endif + } else { + tensors_[i].ShareDataWith(t); + tensors_[i].set_lod(t.lod()); + } + } + + this->WaitAndMergeCPUTensors(); +} + +std::string FetchOpHandle::Name() const { return "Fetch"; } + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_op_handle.h b/paddle/fluid/framework/details/fetch_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..904b2d669f8b156b99197afb0155380d1170a68b --- /dev/null +++ b/paddle/fluid/framework/details/fetch_op_handle.h @@ -0,0 +1,49 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct FetchOpHandle : public OpHandleBase { + FeedFetchList *data_; + size_t offset_; + std::vector *local_scopes_; + std::vector tensors_; + + FetchOpHandle(FeedFetchList *data, size_t offset, + std::vector *local_scopes); + + ~FetchOpHandle(); + + void Wait(platform::DeviceContext *waited_dev) override; + + void WaitAndMergeCPUTensors() const; + + std::string Name() const override; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..8dd85be567d33991ac003707fec939a61a2d0962 --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -0,0 +1,126 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/gather_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +GatherOpHandle::GatherOpHandle(const std::vector &local_scopes, + const std::vector &places) + : local_scopes_(local_scopes), places_(places) {} + +void GatherOpHandle::RunImpl() { + // the input may have dummy var. + std::vector in_var_handles; + for (auto *in : inputs_) { + auto *in_handle = dynamic_cast(in); + if (in_handle) { + in_var_handles.push_back(in_handle); + } + } + PADDLE_ENFORCE_EQ( + in_var_handles.size(), places_.size(), + "The number of output should equal to the number of places."); + + // the output may have dummy var. + std::vector out_var_handles; + for (auto *out : outputs_) { + auto *out_handle = dynamic_cast(out); + if (out_handle) { + out_var_handles.push_back(out_handle); + } + } + PADDLE_ENFORCE_EQ(out_var_handles.size(), 1, + "The number of output should be one."); + + auto in_0_handle = static_cast(in_var_handles[0]); + auto pre_in_var = + local_scopes_[in_0_handle->scope_idx_]->FindVar(in_0_handle->name_); + auto pre_place = in_0_handle->place_; + + PADDLE_ENFORCE(pre_in_var->IsType(), + "Currently, gather_op only can gather SelectedRows."); + + PADDLE_ENFORCE_EQ(out_var_handles[0]->place_.which(), pre_place.which(), + "The place of input and output should be the same."); + + // Wait input done, this Wait is asynchronous operation + for (auto *in : in_var_handles) { + if (in->generated_op_) { + in->generated_op_->Wait(dev_ctxes_[in->place_]); + } + } + + std::vector out_rows; + std::vector in_tensors; + std::vector in_places; + + auto &pre_in = pre_in_var->Get(); + // gather the inputs + for (auto *in : in_var_handles) { + auto in_handle = static_cast(in); + auto in_p = in_handle->place_; + in_places.push_back(in_p); + PADDLE_ENFORCE_EQ(in_p.which(), pre_place.which(), + "Places must be all on CPU or all on CUDA."); + auto in_var = + local_scopes_.at(in_handle->scope_idx_)->FindVar(in_handle->name_); + auto &in_sr = in_var->Get(); + + PADDLE_ENFORCE_EQ(in_sr.value().type(), pre_in.value().type(), + "The type of input is not consistent."); + PADDLE_ENFORCE_EQ(pre_in.height(), in_sr.height(), + "The height of inputs is not consistent."); + PADDLE_ENFORCE_EQ(pre_in.GetCompleteDims(), in_sr.GetCompleteDims(), , + "The dims of inputs is not consistent."); + + auto in_sr_rows = in_sr.rows(); + out_rows.insert(out_rows.end(), in_sr_rows.begin(), in_sr_rows.end()); + + in_tensors.emplace_back(in_sr.value()); + } + + // write the output + auto &out_place = out_var_handles[0]->place_; + auto out_scope_idx = out_var_handles[0]->scope_idx_; + auto out_var = + local_scopes_[out_scope_idx]->FindVar(out_var_handles[0]->name_); + + auto out = out_var->GetMutable(); + out->set_height(pre_in.height()); + out->set_rows(out_rows); + size_t rows = out_rows.size(); + DDim out_dim = pre_in.GetCompleteDims(); + out_dim[0] = static_cast(rows); + out->mutable_value()->Resize(out_dim); + out->mutable_value()->mutable_data(out_place, pre_in.value().type()); + Tensor *out_tensor = out->mutable_value(); + + // copy + int s = 0, e = 0; + for (size_t j = 0; j < in_tensors.size(); ++j) { + e += in_tensors[j].dims()[0]; + auto sub_out = out_tensor->Slice(s, e); + paddle::framework::TensorCopy(in_tensors[j], out_place, + *(dev_ctxes_[in_places[j]]), &sub_out); + s = e; + } +} + +std::string GatherOpHandle::Name() const { return "gather"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle.h b/paddle/fluid/framework/details/gather_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0231f642c05e6b558b7e2518a15e08c816fe4b --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct GatherOpHandle : public OpHandleBase { + const std::vector &local_scopes_; + const std::vector &places_; + + GatherOpHandle(const std::vector &local_scopes, + const std::vector &places); + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..10839f239d59e97946575297a6d125968a1458f4 --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -0,0 +1,192 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/gather_op_handle.h" +#include "gtest/gtest.h" + +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { +namespace f = paddle::framework; +namespace p = paddle::platform; + +// test data amount +const f::DDim kDims = {20, 20}; + +struct TestGatherOpHandle { + std::vector> ctxs_; + std::vector local_scopes_; + Scope g_scope_; + std::unique_ptr op_handle_; + std::vector> vars_; + std::vector gpu_list_; + + void WaitAll() { + for (size_t j = 0; j < ctxs_.size(); ++j) { + ctxs_[j]->Wait(); + } + } + + void InitCtxOnGpu(bool use_gpu) { + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + int count = p::GetCUDADeviceCount(); + if (count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " + "device count is " + << count; + exit(0); + } + for (int i = 0; i < count; ++i) { + auto p = p::CUDAPlace(i); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CUDADeviceContext(p)); + } +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { + int count = 8; + for (int i = 0; i < count; ++i) { + auto p = p::CPUPlace(); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CPUDeviceContext(p)); + } + } + } + + void InitGatherOp(size_t input_scope_idx) { + for (size_t j = 0; j < gpu_list_.size(); ++j) { + local_scopes_.push_back(&(g_scope_.NewScope())); + local_scopes_[j]->Var("out"); + } + local_scopes_[input_scope_idx]->Var("input"); + + op_handle_.reset(new GatherOpHandle(local_scopes_, gpu_list_)); + // add input + for (size_t j = 0; j < gpu_list_.size(); ++j) { + op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); + vars_.emplace_back(new VarHandle()); + VarHandle* in_var_handle = static_cast(vars_.back().get()); + in_var_handle->place_ = gpu_list_[j]; + in_var_handle->name_ = "input"; + in_var_handle->version_ = 1; + in_var_handle->scope_idx_ = j; + in_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_var_handle); + } + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* in_dummy_var_handle = + static_cast(vars_.back().get()); + in_dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_dummy_var_handle); + + // add output + vars_.emplace_back(new VarHandle()); + VarHandle* out_var_handle = static_cast(vars_.back().get()); + out_var_handle->place_ = gpu_list_[input_scope_idx]; + out_var_handle->name_ = "out"; + out_var_handle->version_ = 2; + out_var_handle->scope_idx_ = input_scope_idx; + op_handle_->AddOutput(out_var_handle); + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* dummy_var_handle = + static_cast(vars_.back().get()); + op_handle_->AddOutput(dummy_var_handle); + } + + void TestGatherSelectedRows(size_t output_scope_idx) { + int height = kDims[0] * 2; + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + std::vector send_vector(f::product(kDims)); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + + for (size_t input_scope_idx = 0; input_scope_idx < gpu_list_.size(); + ++input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + auto value = in_selected_rows->mutable_value(); + value->mutable_data(kDims, gpu_list_[input_scope_idx]); + + in_selected_rows->set_height(height); + in_selected_rows->set_rows(rows); + + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), value); + value->Resize(kDims); + } + + auto out_var = local_scopes_[output_scope_idx]->Var("out"); + auto out_selected_rows = out_var->GetMutable(); + + auto in_var = local_scopes_[output_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + + out_selected_rows->mutable_value()->ShareDataWith( + in_selected_rows->value()); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + + auto& out_select_rows = out_var->Get(); + auto rt = out_select_rows.value(); + + PADDLE_ENFORCE_EQ(out_select_rows.height(), height, "height is not equal."); + for (size_t k = 0; k < out_select_rows.rows().size(); ++k) { + PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k % rows.size()]); + } + + f::Tensor result_tensor; + f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + float* ct = result_tensor.data(); + + for (int64_t j = 0; j < f::product(kDims); ++j) { + ASSERT_NEAR(ct[j], send_vector[j % send_vector.size()], 1e-5); + } + } +}; + +TEST(GatherTester, TestCPUGatherTestSelectedRows) { + TestGatherOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitGatherOp(input_scope_idx); + test_op.TestGatherSelectedRows(input_scope_idx); +} + +#ifdef PADDLE_WITH_CUDA + +TEST(GatherTester, TestGPUGatherTestSelectedRows) { + TestGatherOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitGatherOp(input_scope_idx); + test_op.TestGatherSelectedRows(input_scope_idx); +} +#endif +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc new file mode 100644 index 0000000000000000000000000000000000000000..5a95cbc53625888bac539f91af391ff0babec17b --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -0,0 +1,217 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/multi_devices_graph_builder.h" +#include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" +#include "paddle/fluid/framework/details/send_op_handle.h" +#include "paddle/fluid/framework/scope.h" + +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" +#endif + +#include +#include + +namespace paddle { +namespace framework { +namespace details { + +#ifdef PADDLE_WITH_CUDA +MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( + const std::vector &places, + const std::string &loss_var_name, + const std::unordered_set ¶ms, + const std::vector &local_scopes, + platform::NCCLContextMap *nccl_ctxs) + : loss_var_name_(loss_var_name), + places_(places), + local_scopes_(local_scopes), + nccl_ctxs_(nccl_ctxs) { +#else +MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( + const std::vector &places, + const std::string &loss_var_name, + const std::unordered_set ¶ms, + const std::vector &local_scopes) + : loss_var_name_(loss_var_name), + places_(places), + local_scopes_(local_scopes) { +#endif + for (auto &p : params) { + grad_names_.insert(GradVarName(p)); + } +} + +void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, + const OpDesc &op, + const platform::Place &p, + const size_t &i) const { + auto *op_handle = result->ops_.back().get(); + op_handle->dev_ctxes_[p] = platform::DeviceContextPool::Instance().Get(p); + + auto var_names = op.InputArgumentNames(); + + for (auto &each_var_name : var_names) { + VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i); + op_handle->AddInput(var); + } + + var_names = op.OutputArgumentNames(); + + for (auto &each_var_name : var_names) { + CreateOpOutput(result, op_handle, each_var_name, p, i); + } +} + +std::unique_ptr MultiDevSSAGraphBuilder::Build( + const ProgramDesc &program) const { + auto graph = new SSAGraph(); + SSAGraph &result = *graph; + std::unordered_set og_has_been_broadcast; + + // We cannot invoke resize. It is a bug of GCC 4.8 + result.vars_ = std::vector< + std::unordered_map>>>( + places_.size()); + + bool is_forwarding = true; + for (auto *op : program.Block(0).AllOps()) { + bool change_forward = false; + if (!is_forwarding) { + // FIXME(yy): Do not hard code like this + if (op->OutputArgumentNames().size() == 1 && + op->OutputArgumentNames()[0] == GradVarName(loss_var_name_)) { + continue; // Drop fill 1. for backward coeff; + } + } + + // append send op if program is distributed trainer main program. + // always use the first device + if (!is_forwarding && op->Type() == "send") { + auto &p = places_[0]; + auto *s = local_scopes_[0]; + // FIXME(wuyi): send op always copy from GPU 0 + result.ops_.emplace_back(new SendOpHandle(*op, s, p)); + // Create inputs for output on original place and no ssa output + // is created for send op. + CreateOpHandleIOs(&result, *op, p, 0); + continue; + } + + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + auto *s = local_scopes_[i]; + + result.ops_.emplace_back(new ComputationOpHandle(*op, s, p)); + auto *op_handle = result.ops_.back().get(); + CreateOpHandleIOs(&result, *op, p, i); + + auto var_names = op->OutputArgumentNames(); + + if (is_forwarding) { + if (var_names.size() == 1 && var_names[0] == loss_var_name_) { +// Insert ScaleCost OpHandle +#ifdef PADDLE_WITH_CUDA + auto *communication_dev_ctx = nccl_ctxs_->DevCtx(p); +#else + auto *communication_dev_ctx = + platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); +#endif + + op_handle = new ScaleLossGradOpHandle(local_scopes_.size(), s, p, + communication_dev_ctx); + result.ops_.emplace_back(op_handle); + + // FIXME: Currently ScaleLossGradOp only use device_count as scale + // factor. So it does not depend on any other operators. + // VarHandle *loss = GetVarHandle(loss_var_name, place); + // loss->pending_ops_.emplace_back(op_handle); + // op_handle->inputs_.emplace_back(loss); + + CreateOpOutput(&result, op_handle, GradVarName(loss_var_name_), p, i); + change_forward = true; + } + } + } + + if (change_forward) { + is_forwarding = false; + } + + if (!is_forwarding) { + auto var_names = op->OutputArgumentNames(); + // Currently, we assume that once gradient is generated, it can be + // broadcast, and each gradient is only broadcast once. But there are no + // other cases, for example, we need to adjust the gradient according to + // the input when we get the gradient, which is not considered at present. + for (auto &og : var_names) { + if (grad_names_.count(og) != 0 && + og_has_been_broadcast.count(og) == 0) { // is param grad + // Insert NCCL AllReduce Op + og_has_been_broadcast.insert(og); +#ifdef PADDLE_WITH_CUDA + result.ops_.emplace_back( + new NCCLAllReduceOpHandle(local_scopes_, places_, *nccl_ctxs_)); + auto *op_handle = result.ops_.back().get(); + + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + auto &vars = result.vars_[i][og]; + + if (vars.empty()) { // This device has no data. continue. + continue; + } + auto &prev_grad = vars[vars.size() - 1]; + op_handle->AddInput(prev_grad.get()); + + vars.emplace_back(new VarHandle); + auto &var = vars.back(); + var->place_ = p; + var->name_ = og; + var->version_ = vars.size() - 1; + + op_handle->AddOutput(var.get()); + } +#else + PADDLE_ENFORCE("Not implemented"); +#endif + } + } + } + } + + /* + Dependency graph has been constructed. However, there are still data + harzaeds need to be handled. + */ + PolishGraphToSupportDataHazards(&result); + + /* + * Only variables should be the leaves of graph. + */ + AddOutputToLeafOps(&result); + + if (VLOG_IS_ON(10)) { + std::ostringstream sout; + PrintGraphviz(*graph, sout); + VLOG(10) << sout.str(); + } + + return std::unique_ptr(graph); +} // namespace details +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f1518d75b421006db6311c3b0f602e47000ab381 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -0,0 +1,63 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/details/ssa_graph_builder.h" + +namespace paddle { +namespace platform { +class NCCLContextMap; +} + +namespace framework { +class Scope; +namespace details { +class MultiDevSSAGraphBuilder : public SSAGraphBuilder { + public: +#ifdef PADDLE_WITH_CUDA + MultiDevSSAGraphBuilder(const std::vector &places, + const std::string &loss_var_name, + const std::unordered_set ¶ms, + const std::vector &local_scopes, + platform::NCCLContextMap *nccl_ctxs); +#else + MultiDevSSAGraphBuilder(const std::vector &places, + const std::string &loss_var_name, + const std::unordered_set ¶ms, + const std::vector &local_scopes); +#endif + + std::unique_ptr Build(const ProgramDesc &program) const override; + + private: + void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, + const platform::Place &p, const size_t &i) const; + + private: + std::string loss_var_name_; + const std::vector &places_; + const std::vector &local_scopes_; + std::unordered_set grad_names_; + +#ifdef PADDLE_WITH_CUDA + platform::NCCLContextMap *nccl_ctxs_; +#endif +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..1e48f75958a3ada4d1cd5c8d0f920da4fed2157e --- /dev/null +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -0,0 +1,139 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" + +#include + +namespace paddle { +namespace framework { +namespace details { +NCCLAllReduceOpHandle::NCCLAllReduceOpHandle( + const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap &ctxs) + : local_scopes_(local_scopes), places_(places), nccl_ctxs_(ctxs) { + for (auto &p : places_) { + this->dev_ctxes_[p] = nccl_ctxs_.DevCtx(p); + } +} + +struct ReduceLoDTensor { + const std::vector &src_tensors_; + LoDTensor &dst_tensor_; + + ReduceLoDTensor(const std::vector &src, LoDTensor *dst) + : src_tensors_(src), dst_tensor_(*dst) {} + + template + void operator()() const { + PADDLE_ENFORCE(!src_tensors_.empty()); + auto &t0 = src_tensors_[0]; + PADDLE_ENFORCE_NE(t0.numel(), 0); + dst_tensor_.Resize(t0.dims()); + T *dst = dst_tensor_.mutable_data(platform::CPUPlace()); + std::copy(t0.data(), t0.data() + t0.numel(), dst); + + for (size_t i = 1; i < src_tensors_.size(); ++i) { + auto &t = src_tensors_[i]; + PADDLE_ENFORCE_EQ(t.dims(), t0.dims()); + PADDLE_ENFORCE_EQ(t.type(), t0.type()); + std::transform(t.data(), t.data() + t.numel(), dst, dst, + [](T a, T b) -> T { return a + b; }); + } + } +}; + +void NCCLAllReduceOpHandle::RunImpl() { + if (inputs_.size() == 1) { + return; // No need to all reduce when GPU count = 1; + } else { + // Wait input done + for (auto *in : inputs_) { + auto &p = static_cast(in)->place_; + in->generated_op_->Wait(dev_ctxes_[p]); + } + + auto &var_name = static_cast(this->inputs_[0])->name_; + int dtype = -1; + size_t numel = 0; + + std::vector lod_tensors; + + for (size_t i = 0; i < local_scopes_.size(); ++i) { + auto *s = local_scopes_[i]; + + auto &lod_tensor = s->FindVar(var_name)->Get(); + lod_tensors.emplace_back(lod_tensor); + } + + if (platform::is_gpu_place(lod_tensors[0].place())) { + std::vector> all_reduce_calls; + for (size_t i = 0; i < local_scopes_.size(); ++i) { + auto &p = places_[i]; + auto &lod_tensor = lod_tensors[i]; + void *buffer = const_cast(lod_tensor.data()); + + if (dtype == -1) { + dtype = platform::ToNCCLDataType(lod_tensor.type()); + } + + if (numel == 0) { + numel = static_cast(lod_tensor.numel()); + } + + int dev_id = boost::get(p).device; + auto &nccl_ctx = nccl_ctxs_.at(dev_id); + auto stream = nccl_ctx.stream(); + auto comm = nccl_ctx.comm_; + all_reduce_calls.emplace_back([=] { + PADDLE_ENFORCE(platform::dynload::ncclAllReduce( + buffer, buffer, numel, static_cast(dtype), + ncclSum, comm, stream)); + }); + } + this->RunAndRecordEvent([&] { + platform::NCCLGroupGuard guard; + for (auto &call : all_reduce_calls) { + call(); + } + }); + } else { // Special handle CPU only Operator's gradient. Like CRF + auto &trg = + *this->local_scopes_[0]->Var()->GetMutable(); + + // Reduce All Tensor to trg in CPU + ReduceLoDTensor func(lod_tensors, &trg); + VisitDataType(ToDataType(lod_tensors[0].type()), func); + + for (size_t i = 0; i < local_scopes_.size(); ++i) { + auto &scope = local_scopes_[i]; + auto &p = places_[i]; + auto *var = scope->FindVar(var_name); + auto *dev_ctx = dev_ctxes_[p]; + + RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { + auto &tensor_gpu = *var->GetMutable(); + auto &tensor_cpu = trg; + TensorCopy(tensor_cpu, p, *dev_ctx, &tensor_gpu); + }); + } + } + } +} + +std::string NCCLAllReduceOpHandle::Name() const { return "nccl_all_reduce"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.h b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..ad14a3c5cb4625fa121cad2daed389c441e78771 --- /dev/null +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/nccl_helper.h" + +namespace paddle { +namespace framework { +namespace details { + +struct NCCLAllReduceOpHandle : public OpHandleBase { + const std::vector &local_scopes_; + const std::vector &places_; + const platform::NCCLContextMap &nccl_ctxs_; + + NCCLAllReduceOpHandle(const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap &ctxs); + + std::string Name() const override; + + // Delay and buffer nccl_all_reduce together can significantly increase + // performance. Disable this feature by returning false. + bool IsMultiDeviceTransfer() override { return true; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc new file mode 100644 index 0000000000000000000000000000000000000000..534d77860f87be08c8834efd373d90eb199ed6a2 --- /dev/null +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -0,0 +1,128 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/op_handle_base.h" + +namespace paddle { +namespace framework { +namespace details { +std::string OpHandleBase::DebugString() const { + std::stringstream ss; + ss << "("; + for (auto *var : inputs_) { + ss << var->DebugString() << ", "; + } + ss << ") --> ("; + for (auto *var : outputs_) { + ss << var->DebugString() << ", "; + } + ss << ")\n"; + return ss.str(); +} + +OpHandleBase::~OpHandleBase() { +#ifdef PADDLE_WITH_CUDA + for (auto &ev : events_) { + PADDLE_ENFORCE(cudaEventDestroy(ev.second)); + } +#endif +} + +void OpHandleBase::Run(bool use_event) { +#ifdef PADDLE_WITH_CUDA + if (events_.empty() && use_event) { + for (auto &p : dev_ctxes_) { + int dev_id = boost::get(p.first).device; + PADDLE_ENFORCE(cudaSetDevice(dev_id)); + PADDLE_ENFORCE( + cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming)); + } + } +#else + PADDLE_ENFORCE(!use_event); +#endif + + RunImpl(); +} + +void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { +#ifdef PADDLE_WITH_CUDA + if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) { + for (auto &dev_ctx : dev_ctxes_) { + dev_ctx.second->Wait(); + } + } else { + auto stream = + static_cast(waited_dev)->stream(); + for (auto &ev : events_) { + PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0)); + } + } +#else + for (auto &dev_ctx : dev_ctxes_) { + dev_ctx.second->Wait(); + } +#endif +} + +void OpHandleBase::AddInput(VarHandleBase *in) { + this->inputs_.emplace_back(in); + in->pending_ops_.insert(this); +} + +void OpHandleBase::AddOutput(VarHandleBase *out) { + outputs_.emplace_back(out); + out->generated_op_ = this; +} + +void OpHandleBase::RunAndRecordEvent(const std::function &callback) { +#ifdef PADDLE_WITH_CUDA + if (!events_.empty()) { // Use event + std::function method = callback; + + for (auto &p : dev_ctxes_) { + method = [method, p, this]() { + static_cast(p.second)->RecordEvent( + events_.at(boost::get(p.first).device), + method); + }; + } + method(); + } else { +#endif + callback(); +#ifdef PADDLE_WITH_CUDA + } +#endif +} + +void OpHandleBase::RunAndRecordEvent(platform::Place p, + const std::function &callback) { +#ifdef PADDLE_WITH_CUDA + if (platform::is_cpu_place(p) || events_.empty()) { + callback(); + } else { + auto *ctx = dev_ctxes_.at(p); + auto *cuda_ctx = static_cast(ctx); + cuda_ctx->RecordEvent(events_.at(boost::get(p).device), + callback); + } +#else + callback(); +#endif +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a6c8d39cf8741f7d9c91579a650ad742cec381 --- /dev/null +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -0,0 +1,75 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +#include "paddle/fluid/framework/details/var_handle.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/macros.h" + +namespace paddle { +namespace framework { +namespace details { + +constexpr char kLocalExecScopeName[] = "@LCOAL_SCOPE@"; + +class OpHandleBase { + private: + DISABLE_COPY_AND_ASSIGN(OpHandleBase); + + public: + std::vector inputs_; + std::vector outputs_; + std::unordered_map + dev_ctxes_; + +#ifdef PADDLE_WITH_CUDA + std::unordered_map events_; +#endif + + OpHandleBase() {} + + std::string DebugString() const; + + virtual std::string Name() const = 0; + + virtual ~OpHandleBase(); + + void Run(bool use_event); + + virtual void Wait(platform::DeviceContext *waited_dev); + + void AddInput(VarHandleBase *in); + + void AddOutput(VarHandleBase *out); + + // If the Op involves data transfer of multiple devices that + // will likely block other computations. + virtual bool IsMultiDeviceTransfer() { return false; } + + protected: + void RunAndRecordEvent(const std::function &callback); + + void RunAndRecordEvent(platform::Place p, + const std::function &callback); + + virtual void RunImpl() = 0; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..7fb9f99a8a1bc044e2f25f373265a5ec9f7d76d5 --- /dev/null +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -0,0 +1,56 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" + +#include + +namespace paddle { +namespace framework { +namespace details { +ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, + platform::Place place, + platform::DeviceContext *dev_ctx) + : coeff_(static_cast(1.0 / num_dev)), scope_(scope), place_(place) { + dev_ctxes_[place_] = dev_ctx; +} + +ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} + +void ScaleLossGradOpHandle::RunImpl() { + std::string var_name = static_cast(this->outputs_[0])->name_; + + float *tmp = + scope_->FindVar(var_name)->GetMutable()->mutable_data( + make_ddim({1}), place_); + + if (platform::is_cpu_place(place_)) { + *tmp = coeff_; + } else { +#ifdef PADDLE_WITH_CUDA + this->RunAndRecordEvent([&] { + auto stream = + static_cast(this->dev_ctxes_[place_]) + ->stream(); + memory::Copy(boost::get(place_), tmp, + platform::CPUPlace(), &coeff_, sizeof(float), stream); + }); +#endif + } +} + +std::string ScaleLossGradOpHandle::Name() const { return "Scale LossGrad"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.h b/paddle/fluid/framework/details/scale_loss_grad_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..ab7353a4fc56bebfe04696efd838dc4559218058 --- /dev/null +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.h @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { +namespace details { + +struct ScaleLossGradOpHandle : public OpHandleBase { + float coeff_; + Scope *scope_; + platform::Place place_; + + ScaleLossGradOpHandle(size_t num_dev, Scope *scope, platform::Place place, + platform::DeviceContext *context); + + ~ScaleLossGradOpHandle() final; + + std::string Name() const override; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/send_op_handle.cc b/paddle/fluid/framework/details/send_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..549b9d9abbe5bfd17df3509e0442bfa19b7ecd61 --- /dev/null +++ b/paddle/fluid/framework/details/send_op_handle.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/send_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +SendOpHandle::SendOpHandle(const framework::OpDesc &op_desc, + const Scope *local_scope, + const platform::Place &place) + : op_(framework::OpRegistry::CreateOp(op_desc)), + local_scope_(local_scope), + place_(place) {} + +void SendOpHandle::RunImpl() { + // Wait input done + for (auto *in : inputs_) { + auto &p = static_cast(in)->place_; + if (in->DebugString() == "dummy") { // HACK + continue; + } + in->generated_op_->Wait(dev_ctxes_[p]); + } + this->RunAndRecordEvent([&] { op_->Run(*local_scope_, place_); }); +} + +std::string SendOpHandle::Name() const { return "send"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/send_op_handle.h b/paddle/fluid/framework/details/send_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..173f9d726145aeb9e85cc0fb9056eb57bf484098 --- /dev/null +++ b/paddle/fluid/framework/details/send_op_handle.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { +namespace details { + +struct SendOpHandle : public OpHandleBase { + std::unique_ptr op_; + const Scope* local_scope_; + const platform::Place& place_; + + SendOpHandle(const framework::OpDesc& op_desc, const Scope* local_scope, + const platform::Place& place); + + std::string Name() const override; + + // Delay and buffer nccl_all_reduce together can significantly increase + // performance. Disable this feature by returning false. + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph.cc b/paddle/fluid/framework/details/ssa_graph.cc new file mode 100644 index 0000000000000000000000000000000000000000..1b8c889449059c563ea39f86250075ac2537cdbe --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/ssa_graph.h" diff --git a/paddle/fluid/framework/details/ssa_graph.h b/paddle/fluid/framework/details/ssa_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..72684e7f97f1324d0efba960903cf9f2acb618a4 --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/details/var_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +struct SSAGraph { + std::vector< + std::unordered_map>>> + vars_; + // aux variables to represent dependency. Useful to resolve data hazard. + std::unordered_set> dep_vars_; + std::vector> ops_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_builder.cc b/paddle/fluid/framework/details/ssa_graph_builder.cc new file mode 100644 index 0000000000000000000000000000000000000000..be5fb7577581fd99b1b7b80ccdd2acb8d3a91f01 --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph_builder.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/ssa_graph_builder.h" + +namespace paddle { +namespace framework { +namespace details { +void SSAGraphBuilder::PolishGraphToSupportDataHazards(SSAGraph *graph) { + for (auto &var_map : graph->vars_) { + for (auto &name_pair : var_map) { + if (name_pair.second.size() <= 1) { + continue; + } + auto it_new = name_pair.second.rbegin(); + auto it_old = name_pair.second.rbegin(); + ++it_old; + for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) { + auto *write_op = (*it_new)->generated_op_; + auto &read_ops = (*it_old)->pending_ops_; + + for (auto *read_op : read_ops) { + // Manually add a dependency var from read_op to write_op; + if (read_op == write_op) { + // Read Write is the same op. + continue; + } + + auto *dep_var = new DummyVarHandle(); + read_op->AddOutput(dep_var); + write_op->AddInput(dep_var); + graph->dep_vars_.emplace(dep_var); + } + } + } + } +} + +VarHandle *SSAGraphBuilder::CreateOrGetLatestVarHandle( + SSAGraph *graph, const std::string &each_var_name, + const platform::Place &place, size_t place_offset) { + auto &var_holders = graph->vars_[place_offset]; + auto &var_holder = var_holders[each_var_name]; + VarHandle *var = nullptr; + if (var_holder.empty()) { + var_holder.emplace_back(new VarHandle); + auto &init_var = var_holder[0]; + init_var->place_ = place; + init_var->name_ = each_var_name; + init_var->generated_op_ = nullptr; + init_var->version_ = 0; + var = init_var.get(); + } else { + var = var_holder.rbegin()->get(); + } + return var; +} + +void SSAGraphBuilder::CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, + const std::string &each_var_name, + const platform::Place &place, + size_t place_offset) { + auto &vars = graph->vars_[place_offset][each_var_name]; + size_t version = vars.size(); + vars.emplace_back(new VarHandle()); + auto &var = vars.back(); + var->version_ = version; + var->name_ = each_var_name; + var->place_ = place; + op_handle->AddOutput(var.get()); +} + +template +void IterAllVar(const SSAGraph &graph, Callback callback) { + for (auto &each : graph.vars_) { + for (auto &pair1 : each) { + for (auto &pair2 : pair1.second) { + callback(*pair2); + } + } + } + + for (auto &var : graph.dep_vars_) { + callback(*var); + } +} + +void SSAGraphBuilder::PrintGraphviz(const SSAGraph &graph, std::ostream &sout) { + size_t var_id = 0; + std::unordered_map vars; + + sout << "digraph G {\n"; + + IterAllVar(graph, [&](const VarHandleBase &var) { + auto *var_ptr = &var; + auto *var_handle_ptr = dynamic_cast(var_ptr); + auto *dummy_ptr = dynamic_cast(var_ptr); + + size_t cur_var_id = var_id++; + vars[var_ptr] = cur_var_id; + + if (var_handle_ptr) { + sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name_ + << "\\n" + << var_handle_ptr->place_ << "\\n" + << var_handle_ptr->version_ << "\"]" << std::endl; + } else if (dummy_ptr) { + sout << "var_" << cur_var_id << " [label=\"dummy\"]" << std::endl; + } + }); + + size_t op_id = 0; + for (auto &op : graph.ops_) { + std::string op_name = "op_" + std::to_string(op_id++); + sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" + << std::endl; + for (auto in : op->inputs_) { + std::string var_name = "var_" + std::to_string(vars[in]); + sout << var_name << " -> " << op_name << std::endl; + } + + for (auto out : op->outputs_) { + std::string var_name = "var_" + std::to_string(vars[out]); + sout << op_name << " -> " << var_name << std::endl; + } + } + + sout << "}\n"; +} + +void SSAGraphBuilder::AddOutputToLeafOps(SSAGraph *graph) { + for (auto &op : graph->ops_) { + if (!op->outputs_.empty()) { + continue; + } + auto *dummy_leaf = new DummyVarHandle(); + graph->dep_vars_.emplace(dummy_leaf); + op->AddOutput(dummy_leaf); + } +} +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_builder.h b/paddle/fluid/framework/details/ssa_graph_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..be1f0460e45402806b18835f054a7195df1374cc --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph_builder.h @@ -0,0 +1,61 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/details/ssa_graph.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace framework { +namespace details { + +class SSAGraphBuilder { + public: + SSAGraphBuilder() {} + virtual ~SSAGraphBuilder() {} + virtual std::unique_ptr Build(const ProgramDesc &program) const = 0; + + DISABLE_COPY_AND_ASSIGN(SSAGraphBuilder); + + protected: + /** + * We only handle write after read(WAR), since it should not have a write + * after write in program. If there are write after write operators, we need + * prune them. + * + * https://en.wikipedia.org/wiki/Hazard_(computer_architecture)#Write_after_read_(WAR) + */ + static void PolishGraphToSupportDataHazards(SSAGraph *graph); + + static VarHandle *CreateOrGetLatestVarHandle(SSAGraph *graph, + const std::string &each_var_name, + const platform::Place &place, + size_t place_offset); + + static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, + const std::string &each_var_name, + const platform::Place &place, size_t place_offset); + + static void AddOutputToLeafOps(SSAGraph *graph); + + static void PrintGraphviz(const SSAGraph &graph, std::ostream &sout); +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc new file mode 100644 index 0000000000000000000000000000000000000000..8da6ca889b89999e0f6f974503cea476c9de97f3 --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -0,0 +1,28 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/ssa_graph_executor.h" + +namespace paddle { +namespace framework { +namespace details { + +SSAGraphExecutor::SSAGraphExecutor(std::unique_ptr &&graph) + : graph_(std::move(graph)) {} + +SSAGraphExecutor::~SSAGraphExecutor() {} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..a8833b7388ab907020a260d356f1484ffd227658 --- /dev/null +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/ssa_graph.h" +#include "paddle/fluid/framework/feed_fetch_type.h" + +namespace paddle { +namespace framework { +namespace details { +class SSAGraphExecutor { + DISABLE_COPY_AND_ASSIGN(SSAGraphExecutor); + + public: + // Steal graph inside + explicit SSAGraphExecutor(std::unique_ptr &&graph); + + virtual ~SSAGraphExecutor(); + + virtual FeedFetchList Run(const std::vector &fetch_tensors) = 0; + + protected: + std::unique_ptr graph_; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc new file mode 100644 index 0000000000000000000000000000000000000000..a371ee10fe03cda86c316f3503f9cadb8c716ae5 --- /dev/null +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -0,0 +1,219 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" + +#include "paddle/fluid/framework/details/fetch_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { +ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( + size_t num_threads, bool use_event, + const std::vector &local_scopes, + const std::vector &places, + std::unique_ptr &&graph, bool allow_op_delay) + : SSAGraphExecutor(std::move(graph)), + pool_(num_threads >= 2 ? new ::ThreadPool(num_threads) : nullptr), + local_scopes_(local_scopes), + places_(places), + fetch_ctxs_(places), + use_event_(use_event), + running_ops_(0), + allow_op_delay_(allow_op_delay) {} + +void ThreadedSSAGraphExecutor::RunDelayedOps( + const std::unordered_set &delayed_ops) { + for (auto op : delayed_ops) { + op->Run(use_event_); + } +} + +FeedFetchList ThreadedSSAGraphExecutor::Run( + const std::vector &fetch_tensors) { + std::unordered_map pending_ops; + std::unordered_set pending_vars; + BlockingQueue ready_vars; + std::unordered_set ready_ops; + // For ops (e.g. nccl_all_reduce) that need to coordinate multiple + // streams from multiple GPUs, it's faster to buffer them and schedule + // together since we currently cannot overlap computation and memcpy streams. + // Should revisit it if overlapping is available. + std::unordered_set delayed_ops; + std::unordered_set blocked_by_delayed_ops; + std::unordered_set delayed_vars; + + auto InsertPendingVar = [&pending_vars, &ready_vars](VarHandleBase &var) { + pending_vars.insert(&var); + if (var.generated_op_ == nullptr) { + ready_vars.Push(&var); + } + }; + + auto InsertPendingOp = [&pending_ops](OpHandleBase &op_instance) { + pending_ops.insert({&op_instance, op_instance.inputs_.size()}); + }; + + // Transform SSAGraph to pending_ops & pending_vars + for (auto &var_map : graph_->vars_) { + for (auto &name_pair : var_map) { + for (auto &version_pair : name_pair.second) { + InsertPendingVar(*version_pair); + } + } + } + for (auto &var : graph_->dep_vars_) { + InsertPendingVar(*var); + } + + for (auto &op : graph_->ops_) { + if (op->inputs_.empty()) { // Special case, Op has no input. + ready_ops.insert(op.get()); + } else { + InsertPendingOp(*op); + } + } + + // Step 2. Insert FetchOps + std::vector> fetch_ops; + FeedFetchList fetch_data(fetch_tensors.size()); + + std::unordered_map> fetched_vars; + + for (auto &fetch_var_name : fetch_tensors) { + for (auto &var_map : graph_->vars_) { + auto it = var_map.find(fetch_var_name); + if (it != var_map.end()) { + fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + } + } + } + + std::unordered_set> fetch_dependencies; + for (size_t i = 0; i < fetch_tensors.size(); ++i) { + auto &var_name = fetch_tensors[i]; + auto &vars = fetched_vars.at(var_name); + auto *op = new FetchOpHandle(&fetch_data, i, &local_scopes_); + fetch_ops.emplace_back(op); + + for (auto &p : places_) { + op->dev_ctxes_[p] = fetch_ctxs_.Get(p); + } + + for (auto *var : vars) { + op->AddInput(var); + } + + auto *fetch_dummy = new DummyVarHandle(); + op->AddOutput(fetch_dummy); + fetch_dependencies.emplace(fetch_dummy); + InsertPendingVar(*fetch_dummy); + InsertPendingOp(*op); + } + + auto run_all_ready_ops = [&] { + for (auto *op : ready_ops) { + if (op->IsMultiDeviceTransfer() && allow_op_delay_) { + delayed_ops.insert(op); + delayed_vars.insert(op->outputs_.begin(), op->outputs_.end()); + ready_vars.Extend(op->outputs_); + continue; + } + running_ops_++; + RunOp(&ready_vars, op); + } + ready_ops.clear(); + }; + + // Step 3. Execution + while (!pending_vars.empty() || !ready_ops.empty() || !delayed_ops.empty()) { + // 1. Run All Ready ops + run_all_ready_ops(); + + // 2. Find ready variable + bool timeout; + auto cur_ready_vars = ready_vars.PopAll(1, &timeout); + + if (timeout) { + if (exception_) { + throw * exception_; + } else { + continue; + } + } + // 3. Remove the dependency of ready_var. + // Find the ready_ops after the ready_var. + for (auto ready_var : cur_ready_vars) { + pending_vars.erase(ready_var); + for (auto *op : ready_var->pending_ops_) { + auto &deps = pending_ops[op]; + --deps; + if (deps == 0) { + if (delayed_vars.find(ready_var) != delayed_vars.end()) { + blocked_by_delayed_ops.insert(op); + } else { + ready_ops.insert(op); + } + } + } + } + // When there are no other ops to schedule, schedule buffered delayed + // ops and unblock other ops. + if (ready_ops.empty() && !delayed_ops.empty() && running_ops_ == 0) { + RunDelayedOps(delayed_ops); + delayed_ops.clear(); + for (auto *op : blocked_by_delayed_ops) { + ready_ops.insert(op); + } + blocked_by_delayed_ops.clear(); + } + // Keep loop until all vars are ready. + } + PADDLE_ENFORCE(ready_ops.empty()); + PADDLE_ENFORCE(delayed_ops.empty()); + PADDLE_ENFORCE(blocked_by_delayed_ops.empty()); + + // Wait FetchOps. + if (!fetch_ops.empty()) { + fetch_ops.clear(); + } + + return fetch_data; +} + +void ThreadedSSAGraphExecutor::RunOp( + BlockingQueue *ready_var_q, details::OpHandleBase *op) { + auto op_run = [ready_var_q, op, this] { + try { + VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); + op->Run(use_event_); + VLOG(10) << op << " " << op->Name() << " Done "; + running_ops_--; + ready_var_q->Extend(op->outputs_); + VLOG(10) << op << " " << op->Name() << "Signal posted"; + } catch (platform::EnforceNotMet ex) { + exception_.reset(new platform::EnforceNotMet(ex)); + } catch (...) { + LOG(FATAL) << "Unknown exception catched"; + } + }; + if (pool_) { + pool_->enqueue(op_run); + } else { + op_run(); + } +} +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..bb5e837b135c35b5aea403496b45aab1ccc288ff --- /dev/null +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -0,0 +1,106 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include "ThreadPool.h" // ThreadPool in thrird party +#include "paddle/fluid/framework/details/ssa_graph_executor.h" + +namespace paddle { +namespace framework { +class Scope; + +namespace details { + +template +class BlockingQueue { + public: + void Push(const T &item) { + { + std::lock_guard g(mutex_); + q_.emplace_back(item); + } + cv_.notify_one(); + } + + template + void Extend(const U &items) { + { + std::lock_guard g(mutex_); + for (auto &item : items) { + q_.emplace_back(item); + } + } + cv_.notify_all(); + } + + std::deque PopAll(size_t ms, bool *timeout) { + auto time = + std::chrono::system_clock::now() + std::chrono::milliseconds(ms); + std::unique_lock lock(mutex_); + *timeout = !cv_.wait_until(lock, time, [this] { return !q_.empty(); }); + std::deque ret; + if (!*timeout) { + std::swap(ret, q_); + } + return ret; + } + + private: + std::mutex mutex_; + std::condition_variable cv_; + std::deque q_; +}; + +class ThreadedSSAGraphExecutor : public SSAGraphExecutor { + public: + ThreadedSSAGraphExecutor(size_t num_threads, bool use_event, + const std::vector &local_scopes, + const std::vector &places, + std::unique_ptr &&graph, + bool allow_op_delay); + + // Run a SSAGraph by a thread pool + // Use topological sort algorithm + FeedFetchList Run(const std::vector &fetch_tensors) override; + + ~ThreadedSSAGraphExecutor() {} + + private: + void RunOp(BlockingQueue *ready_var_q, + details::OpHandleBase *op); + + void RunDelayedOps(const std::unordered_set &delayed_ops); + + private: + std::unique_ptr<::ThreadPool> pool_; + std::vector local_scopes_; + std::vector places_; + platform::DeviceContextPool fetch_ctxs_; + const bool use_event_; + std::unique_ptr exception_; + std::atomic running_ops_; + bool allow_op_delay_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/var_handle.cc b/paddle/fluid/framework/details/var_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..6f00abd9473a84a77ed1a39015e2ae079e00be79 --- /dev/null +++ b/paddle/fluid/framework/details/var_handle.cc @@ -0,0 +1,32 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/var_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +VarHandleBase::~VarHandleBase() {} + +std::string VarHandle::DebugString() const { + std::stringstream ss; + ss << name_ << ":" << place_; + return ss.str(); +} + +std::string DummyVarHandle::DebugString() const { return "dummy"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..871e41343f53b801a22d3a450f0906f37fb372d1 --- /dev/null +++ b/paddle/fluid/framework/details/var_handle.h @@ -0,0 +1,65 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include + +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace framework { +namespace details { +class OpHandleBase; + +// VarHandleBase is the var node in the dependency graph. +// A variable can only be generated by a single operator. i.e. +// This is a single assignment graph. +struct VarHandleBase { + virtual ~VarHandleBase(); + virtual std::string DebugString() const = 0; + + // The operator who generate this variable. nullptr if the variable + // is a root node. + OpHandleBase *generated_op_; + + // Operators which depend on this variable ready. + std::unordered_set pending_ops_; +}; + +// VarHandle is actually a single version of Runtime Variable. +// Variable in Runtime mapped to many VarHandles in Graph. +// Each assignment will generate a new var handle with newer version. +// +// NOTE: runtime variables have place. +struct VarHandle : public VarHandleBase { + std::string DebugString() const override; + + // version field currently is not used, however, just store the version to + // debug easily. + size_t version_; + size_t scope_idx_; + std::string name_; + platform::Place place_; +}; + +// Dummy Variable. It is used to represent dependencies between operators +struct DummyVarHandle : public VarHandleBase { + std::string DebugString() const override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 0b171e1dcfa90c3ad8f5a9ace8a9342baaf76e61..513e720fd099bcd898a6c73afd1a3a16f6f53aab 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -46,7 +46,7 @@ ExecutorPrepareContext::~ExecutorPrepareContext() { Executor::Executor(const platform::Place& place) : place_(place) {} -static void CreateTensor(Variable* var, proto::VarType::Type var_type) { +void InitializeVariable(Variable* var, proto::VarType::Type var_type) { if (var_type == proto::VarType::LOD_TENSOR) { var->GetMutable(); } else if (var_type == proto::VarType::SELECTED_ROWS) { @@ -83,8 +83,8 @@ static void CheckTensorNANOrInf(const std::string& name, if (tensor.memory_size() == 0) { return; } - if (tensor.type().hash_code() != typeid(float).hash_code() && - tensor.type().hash_code() != typeid(double).hash_code()) { + if (tensor.type().hash_code() != typeid(float).hash_code() && // NOLINT + tensor.type().hash_code() != typeid(double).hash_code()) { // NOLINT return; } PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), @@ -93,6 +93,43 @@ static void CheckTensorNANOrInf(const std::string& name, "Tensor %s contains NAN", name); } +void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope, + int block_id) { + auto& global_block = pdesc.Block(block_id); + + const Scope* ancestor_scope = scope; + while (ancestor_scope->parent()) { + ancestor_scope = ancestor_scope->parent(); + } + + if (ancestor_scope != scope) { + for (auto& var : global_block.AllVars()) { + if (var->Name() == framework::kEmptyVarName) { + continue; + } + + if (var->Persistable()) { + auto* ptr = const_cast(ancestor_scope)->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " global, which pointer is " << ptr; + } else { + auto* ptr = scope->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " locally, which pointer is " << ptr; + } + } + } else { + for (auto& var : global_block.AllVars()) { + auto* ptr = scope->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create variable " << var->Name() << ", which pointer is " + << ptr; + } + } +} + void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, bool create_local_scope, bool create_vars) { platform::RecordBlock b(block_id); @@ -108,12 +145,13 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, // Return true if the block has feed operators and holder of matching info. static bool has_feed_operators( const BlockDesc& block, - std::map& feed_targets, + const std::map& feed_targets, const std::string& feed_holder_name) { size_t feed_count = 0; for (auto* op : block.AllOps()) { if (op->Type() == kFeedOpType) { feed_count++; + // The input variable's name of feed_op should be feed_holder_name. PADDLE_ENFORCE_EQ(op->Input("X")[0], feed_holder_name, "Input to feed op should be '%s'", feed_holder_name); std::string feed_target_name = op->Output("Out")[0]; @@ -129,13 +167,15 @@ static bool has_feed_operators( feed_count, feed_targets.size(), "The number of feed operators should match 'feed_targets'"); - // When feed operator are present, so should be feed_holder - auto var = block.FindVar(feed_holder_name); - PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", - feed_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, - "'%s' variable should be 'FEED_MINIBATCH' type", - feed_holder_name); + if (!feed_holder_name.empty()) { + // When feed operator are present, so should be feed_holder. + auto var = block.FindVar(feed_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + feed_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, + "'%s' variable should be 'FEED_MINIBATCH' type", + feed_holder_name); + } } return feed_count > 0; @@ -148,12 +188,14 @@ static bool has_feed_operators( // and fetch_holder_name. Raise exception when any mismatch is found. // Return true if the block has fetch operators and holder of matching info. static bool has_fetch_operators( - const BlockDesc& block, std::map& fetch_targets, + const BlockDesc& block, + const std::map& fetch_targets, const std::string& fetch_holder_name) { size_t fetch_count = 0; for (auto* op : block.AllOps()) { if (op->Type() == kFetchOpType) { fetch_count++; + // The output variable's name of fetch_op should be fetch_holder_name. PADDLE_ENFORCE_EQ(op->Output("Out")[0], fetch_holder_name, "Output of fetch op should be '%s'", fetch_holder_name); std::string fetch_target_name = op->Input("X")[0]; @@ -169,13 +211,15 @@ static bool has_fetch_operators( fetch_count, fetch_targets.size(), "The number of fetch operators should match 'fetch_targets'"); - // When fetch operator are present, so should be fetch_holder - auto var = block.FindVar(fetch_holder_name); - PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", - fetch_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, - "'%s' variable should be 'FETCH_LIST' type", - fetch_holder_name); + if (!fetch_holder_name.empty()) { + // When fetch operator are present, so should be fetch_holder. + auto var = block.FindVar(fetch_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + fetch_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, + "'%s' variable should be 'FETCH_LIST' type", + fetch_holder_name); + } } return fetch_count > 0; @@ -184,8 +228,8 @@ static bool has_fetch_operators( void Executor::Run(const ProgramDesc& program, Scope* scope, std::map& feed_targets, std::map& fetch_targets, - const std::string& feed_holder_name, - const std::string& fetch_holder_name, bool create_vars) { + bool create_vars, const std::string& feed_holder_name, + const std::string& fetch_holder_name) { platform::RecordBlock b(kProgramId); bool has_feed_ops = has_feed_operators(program.Block(0), feed_targets, feed_holder_name); @@ -222,16 +266,6 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - // map the data of feed_targets to feed_holder - for (auto* op : global_block->AllOps()) { - if (op->Type() == kFeedOpType) { - std::string feed_target_name = op->Output("Out")[0]; - int idx = boost::get(op->GetAttr("col")); - SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, - idx); - } - } - if (!has_fetch_ops) { // create fetch_holder variable auto* fetch_holder = global_block->Var(fetch_holder_name); @@ -255,17 +289,9 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - Run(*copy_program, scope, 0, create_vars, create_vars); - - // obtain the data of fetch_targets from fetch_holder - for (auto* op : global_block->AllOps()) { - if (op->Type() == kFetchOpType) { - std::string fetch_target_name = op->Input("X")[0]; - int idx = boost::get(op->GetAttr("col")); - *fetch_targets[fetch_target_name] = - GetFetchVariable(*scope, fetch_holder_name, idx); - } - } + auto ctx = Prepare(*copy_program, 0); + RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, create_vars, + feed_holder_name, fetch_holder_name); } std::unique_ptr Executor::Prepare( @@ -279,40 +305,30 @@ std::unique_ptr Executor::Prepare( return std::unique_ptr(ctx); } +std::vector> Executor::Prepare( + const ProgramDesc& program, const std::vector& block_ids) { + std::vector> result; + for (auto& bid : block_ids) { + auto* ctx = new ExecutorPrepareContext(program, bid); + PADDLE_ENFORCE_LT(static_cast(bid), program.Size()); + auto& block = program.Block(bid); + for (auto& op_desc : block.AllOps()) { + ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); + } + result.push_back(std::shared_ptr(ctx)); + } + return result; +} + void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope, bool create_vars) { - auto& block = ctx->prog_.Block(ctx->block_id_); - Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { local_scope = &scope->NewScope(); - for (auto& var : block.AllVars()) { - if (var->Name() == framework::kEmptyVarName) { - continue; - } - - if (var->Persistable()) { - auto* ptr = scope->Var(var->Name()); - CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " global, which pointer is " << ptr; - } else { - auto* ptr = local_scope->Var(var->Name()); - CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " locally, which pointer is " << ptr; - } - } - } else { - for (auto& var : block.AllVars()) { - auto* ptr = local_scope->Var(var->Name()); - CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create variable " << var->Name() << ", which pointer is " - << ptr; - } - } // if (create_local_scope) - } // if (create_vars) + } + CreateVariables(ctx->prog_, local_scope, ctx->block_id_); + } for (auto& op : ctx->ops_) { VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); @@ -343,5 +359,42 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } } +void Executor::RunPreparedContext( + ExecutorPrepareContext* ctx, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, bool create_vars, + const std::string& feed_holder_name, const std::string& fetch_holder_name) { + auto& global_block = ctx->prog_.Block(ctx->block_id_); + + PADDLE_ENFORCE( + has_feed_operators(global_block, feed_targets, feed_holder_name), + "Program in ExecutorPrepareContext should has feed_ops."); + PADDLE_ENFORCE( + has_fetch_operators(global_block, fetch_targets, fetch_holder_name), + "Program in the prepared context should has fetch_ops."); + + // map the data of feed_targets to feed_holder + for (auto* op : global_block.AllOps()) { + if (op->Type() == kFeedOpType) { + std::string feed_target_name = op->Output("Out")[0]; + int idx = boost::get(op->GetAttr("col")); + SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, + idx); + } + } + + RunPreparedContext(ctx, scope, create_vars, create_vars); + + // obtain the data of fetch_targets from fetch_holder + for (auto* op : global_block.AllOps()) { + if (op->Type() == kFetchOpType) { + std::string fetch_target_name = op->Input("X")[0]; + int idx = boost::get(op->GetAttr("col")); + *fetch_targets[fetch_target_name] = + GetFetchVariable(*scope, fetch_holder_name, idx); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index d8dd82469af06a4c5c6a37d2249ee23413884a91..43defdacf2a1c2f59cf3af2461ae6cfc4c61f5be 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once +#include +#include +#include #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" @@ -22,6 +25,7 @@ limitations under the License. */ namespace paddle { namespace framework { +extern void InitializeVariable(Variable* var, proto::VarType::Type var_type); struct ExecutorPrepareContext { ExecutorPrepareContext(const framework::ProgramDesc& prog, size_t block_id); @@ -53,17 +57,29 @@ class Executor { void Run(const ProgramDesc& program, Scope* scope, std::map& feed_targets, std::map& fetch_targets, + bool create_vars = true, const std::string& feed_holder_name = "feed", - const std::string& fetch_holder_name = "fetch", - bool create_vars = true); + const std::string& fetch_holder_name = "fetch"); static std::unique_ptr Prepare( const ProgramDesc& program, int block_id); + static std::vector> Prepare( + const ProgramDesc& program, const std::vector& block_ids); + + void CreateVariables(const ProgramDesc& pdesc, Scope* scope, int block_id); + void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope = true, bool create_vars = true); + void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, + bool create_vars = true, + const std::string& feed_holder_name = "feed", + const std::string& fetch_holder_name = "fetch"); + private: const platform::Place place_; }; diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 3c0d93642ac41e8d90f9a248e81cea7a4fe12293..75c557fa4243f4bd984314fac298e9335108e7a9 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -64,7 +64,7 @@ void InitP2P(int count) { #endif } -void InitDevices() { +void InitDevices(bool init_p2p) { /*Init all avaiable devices by default */ std::vector places; @@ -85,7 +85,9 @@ void InitDevices() { for (int i = 0; i < count; ++i) { places.emplace_back(platform::CUDAPlace(i)); } - InitP2P(count); + if (init_p2p) { + InitP2P(count); + } platform::DeviceContextPool::Init(places); } diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index 7d86d1581190780f513776c69b18ad41eb2ce14d..fae98a60b5111465375404609905980177f613b1 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -24,7 +24,7 @@ void InitGflags(std::vector &argv); void InitGLOG(const std::string &prog_name); -void InitDevices(); +void InitDevices(bool init_p2p); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/init_test.cc b/paddle/fluid/framework/init_test.cc index 2a03f0afe657e4b3ac173e8718dd6f6f81ee5e6a..928e2d14abea604cf483f4bc1e1c58fbae04dd21 100644 --- a/paddle/fluid/framework/init_test.cc +++ b/paddle/fluid/framework/init_test.cc @@ -21,7 +21,7 @@ TEST(InitDevices, CPU) { using paddle::platform::DeviceContextPool; #ifndef PADDLE_WITH_CUDA - InitDevices(); + InitDevices(true); DeviceContextPool& pool = DeviceContextPool::Instance(); ASSERT_EQ(pool.size(), 1U); #endif @@ -33,7 +33,7 @@ TEST(InitDevices, CUDA) { #ifdef PADDLE_WITH_CUDA int count = paddle::platform::GetCUDADeviceCount(); - InitDevices(); + InitDevices(true); DeviceContextPool& pool = DeviceContextPool::Instance(); ASSERT_EQ(pool.size(), 1U + static_cast(count)); #endif diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 8155cb55a468a09320b1196b49fc3e34cea261b1..a56674cbe216e312c4394ef537140122352dc785 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -12,9 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/lod_tensor.h" +#include +#include +#include +#include + #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" @@ -22,11 +27,6 @@ limitations under the License. */ #include "paddle/fluid/recordio/scanner.h" #include "paddle/fluid/recordio/writer.h" -#include -#include -#include -#include - namespace paddle { namespace framework { @@ -294,7 +294,7 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, TensorFromStream(is, static_cast(tensor), dev_ctx); } -void WriteToRecordIO(recordio::Writer &writer, +void WriteToRecordIO(recordio::Writer *writer, const std::vector &tensor, const platform::DeviceContext &dev_ctx) { std::stringstream buffer; @@ -303,18 +303,20 @@ void WriteToRecordIO(recordio::Writer &writer, for (auto &each : tensor) { SerializeToStream(buffer, each, dev_ctx); } - writer.Write(buffer.str()); + writer->Write(buffer.str()); } std::vector ReadFromRecordIO( - recordio::Scanner &scanner, const platform::DeviceContext &dev_ctx) { - std::istringstream sin(scanner.Next()); - uint32_t sz; - sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) { std::vector result; - result.resize(sz); - for (uint32_t i = 0; i < sz; ++i) { - DeserializeFromStream(sin, &result[i], dev_ctx); + if (scanner->HasNext()) { + std::istringstream sin(scanner->Next()); + uint32_t sz; + sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + result.resize(sz); + for (uint32_t i = 0; i < sz; ++i) { + DeserializeFromStream(sin, &result[i], dev_ctx); + } } return result; } diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index dee505fee0dccd8d60bb290a8bec4df243e504a2..1159fee39b0737402c60448dcbe69e7535c9d6e1 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -15,6 +15,9 @@ limitations under the License. */ #pragma once #include +#include +#include +#include #ifdef PADDLE_WITH_CUDA #include #include @@ -142,6 +145,7 @@ class LoDTensor : public Tensor { return (lod_)[level].size() - 1; } + // Split LoDTensor and copy to each place specified in places. std::vector SplitLoDTensor( const std::vector places) const; @@ -215,12 +219,12 @@ void SerializeToStream(std::ostream& os, const LoDTensor& tensor, void DeserializeFromStream(std::istream& is, LoDTensor* tensor, const platform::DeviceContext& dev_ctx); -extern void WriteToRecordIO(recordio::Writer& writer, +extern void WriteToRecordIO(recordio::Writer* writer, const std::vector& tensor, const platform::DeviceContext& dev_ctx); extern std::vector ReadFromRecordIO( - recordio::Scanner& scanner, const platform::DeviceContext& dev_ctx); + recordio::Scanner* scanner, const platform::DeviceContext& dev_ctx); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index e691e29383d4842b80769021e0e494967d38e9bb..97ab98f09b1a902a942d9667bc7716a28b98d54c 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/lod_tensor.h" - -#include "paddle/fluid/recordio/scanner.h" -#include "paddle/fluid/recordio/writer.h" - #include #include #include #include #include +#include "paddle/fluid/framework/lod_tensor.h" + +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/recordio/writer.h" + namespace paddle { namespace framework { @@ -240,8 +240,8 @@ TEST(LoDTensor, RecordIO) { *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); { recordio::Writer writer(stream, recordio::Compressor::kSnappy); - WriteToRecordIO(writer, {tensor, tensor}, ctx); - WriteToRecordIO(writer, {tensor, tensor}, ctx); + WriteToRecordIO(&writer, {tensor, tensor}, ctx); + WriteToRecordIO(&writer, {tensor, tensor}, ctx); writer.Flush(); } @@ -254,11 +254,11 @@ TEST(LoDTensor, RecordIO) { { std::unique_ptr stream_ptr(stream); recordio::Scanner scanner(std::move(stream_ptr)); - auto tensors = ReadFromRecordIO(scanner, ctx); + auto tensors = ReadFromRecordIO(&scanner, ctx); ASSERT_EQ(tensors.size(), 2); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); - tensors = ReadFromRecordIO(scanner, ctx); + tensors = ReadFromRecordIO(&scanner, ctx); ASSERT_EQ(tensors.size(), 2); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); diff --git a/paddle/fluid/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu index be65da5ba230e4bb15b09a07431d3107ffe19522..e3efbe4c464493af87e33510647d8c67d457a76d 100644 --- a/paddle/fluid/framework/lod_tensor_test.cu +++ b/paddle/fluid/framework/lod_tensor_test.cu @@ -30,7 +30,7 @@ __global__ void test(size_t* a, int size) { } TEST(LoD, data) { - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); paddle::framework::LoD lod{{0, 1, 2}}; lod.push_back({0, 2, 4, 5}); @@ -46,7 +46,7 @@ TEST(LoD, data) { } TEST(LoDTensor, LoDInGPU) { - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); paddle::framework::LoDTensor lod_tensor; paddle::platform::CUDAPlace place(0); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index b39a1164dbd9877d9f45cc6415d74f930921a42f..f97bd0827428feeb590fcad16c48f3461517a646 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -35,7 +35,19 @@ std::vector> kKernelPriority = { std::make_tuple(platform::CPUPlace(), LibraryType::kPlain), }; -static DDim GetDims(const Scope& scope, const std::string& name) { +proto::VarType::Type GetDataTypeOfVar(const Variable* var) { + if (var->IsType()) { + return framework::ToDataType(var->Get().type()); + } else if (var->IsType()) { + return framework::ToDataType( + var->Get().value().type()); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows"); + } +} + +static DDim GetDims(const Scope& scope, const std::string& name, + bool get_actual_dim = false) { Variable* var = scope.FindVar(name); if (var == nullptr) { return DDim({-1}); @@ -44,7 +56,11 @@ static DDim GetDims(const Scope& scope, const std::string& name) { if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { - return var->Get().GetCompleteDims(); + if (get_actual_dim) { + return var->Get().value().dims(); + } else { + return var->Get().GetCompleteDims(); + } } else { return DDim({-1}); } @@ -118,7 +134,7 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < input.second.size(); ++i) { ss << input.second[i]; if (scope) { - ss << "[" << GetDims(*scope, input.second[i]) << "]"; + ss << "[" << GetDims(*scope, input.second[i], true) << "]"; ss << "(" << GetLoD(*scope, input.second[i]) << ")"; } if (i != input.second.size() - 1) { @@ -138,7 +154,7 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < output.second.size(); ++i) { ss << output.second[i]; if (scope) { - ss << "[" << GetDims(*scope, output.second[i]) << "]"; + ss << "[" << GetDims(*scope, output.second[i], true) << "]"; ss << "(" << GetLoD(*scope, output.second[i]) << ")"; } if (i != output.second.size() - 1) { @@ -517,6 +533,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, // do data transform Scope& new_scope = scope.NewScope(); + std::vector inplace_vars; for (auto& var_name_item : this->Inputs()) { for (auto& var_name : var_name_item.second) { auto* var = scope.FindVar(var_name); @@ -529,10 +546,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, auto out_var_names = OutputVars(true); if (std::find(out_var_names.begin(), out_var_names.end(), var_name) != out_var_names.end()) { - PADDLE_THROW( - "var %s is both input and output, " - "does not support transform", - var_name); + inplace_vars.push_back(var_name); } VLOG(3) << "Transform Variable " << var_name << " from " << kernel_type_for_var << " to " << expected_kernel_key; @@ -551,6 +565,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope, kernel_iter->second->Compute( ExecutionContext(*this, new_scope, *new_dev_ctx)); + for (auto& var_name : inplace_vars) { + VLOG(3) << "share inplace var " + var_name + " back to it's original scope"; + auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name)); + auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name)); + original_tensor->ShareDataWith(*transformed_tensor); + } + /*For profiling/benchmark only*/ if (FLAGS_benchmark) { new_dev_ctx->Wait(); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 41214b41cb68cbd7049552f39195ae5257e0d06f..b7a7c69b4c8493f945926c75797c49d327a3197e 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -61,6 +61,8 @@ inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } +proto::VarType::Type GetDataTypeOfVar(const Variable* var); + class OperatorBase; class ExecutionContext; diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 44ca4d7ca564515ae267c5949d29feaf22790251..25f622b725277ac9bcca4622902162f3edf147e8 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -72,7 +72,7 @@ REGISTER_OP_WITHOUT_GRADIENT(test_operator, paddle::framework::OpWithoutKernelCheckerMaker); TEST(OperatorBase, all) { - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); paddle::framework::proto::OpDesc op_desc; op_desc.set_type("test_operator"); BuildVar("input", {"IN1"}, op_desc.add_inputs()); @@ -198,7 +198,7 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, // test with single input TEST(OpKernel, all) { - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); paddle::framework::proto::OpDesc op_desc; op_desc.set_type("op_with_kernel"); BuildVar("x", {"IN1"}, op_desc.add_inputs()); @@ -228,7 +228,7 @@ REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, TEST(OpKernel, multi_inputs) { using namespace paddle::framework; - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); proto::OpDesc op_desc; op_desc.set_type("op_multi_inputs_with_kernel"); @@ -269,7 +269,7 @@ class OperatorClone : public paddle::framework::OperatorBase { }; TEST(Operator, Clone) { - paddle::framework::InitDevices(); + paddle::framework::InitDevices(true); OperatorClone a("ABC", paddle::framework::VariableNameMap{}, paddle::framework::VariableNameMap{}, paddle::framework::AttributeMap{}); diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc new file mode 100644 index 0000000000000000000000000000000000000000..c1486b527d2e06d2b3f7e0f89458bf9a22564586 --- /dev/null +++ b/paddle/fluid/framework/parallel_executor.cc @@ -0,0 +1,222 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/parallel_executor.h" + +#include +#include +#include + +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/nccl_helper.h" +#endif + +#include "paddle/fluid/framework/details/multi_devices_graph_builder.h" +#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace framework { + +class ParallelExecutorPrivate { + public: + explicit ParallelExecutorPrivate(const std::vector &places) + : places_(places) {} + + std::vector places_; + std::vector local_scopes_; + Scope *global_scope_; + std::unique_ptr executor_; + +#ifdef PADDLE_WITH_CUDA + std::unique_ptr nccl_ctxs_; +#endif + + std::vector> var_types_; +}; + +std::vector &ParallelExecutor::GetLocalScopes() { + return member_->local_scopes_; +} + +ParallelExecutor::ParallelExecutor( + size_t num_threads, bool use_event, + const std::vector &places, + const std::unordered_set ¶ms, + const std::unordered_set &bcast_vars, + const ProgramDesc &main_program, const std::string &loss_var_name, + Scope *scope, const std::vector &local_scopes, bool allow_op_delay) + : member_(new ParallelExecutorPrivate(places)) { + member_->global_scope_ = scope; + + // Step 1. Bcast the params to devs. + // Create local scopes + if (local_scopes.empty()) { + for (size_t i = 0; i < member_->places_.size(); ++i) { + member_->local_scopes_.push_back(&scope->NewScope()); + } + } else { + PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size()); + for (size_t i = 0; i < member_->places_.size(); ++i) { + member_->local_scopes_.push_back(local_scopes[i]); + } + } + +// Bcast Parameters to all GPUs +#ifdef PADDLE_WITH_CUDA + member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_)); +#endif + if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 && + local_scopes.empty()) { // Is CUDA + BCastParamsToGPUs(bcast_vars); + } +// Startup Program has been run. All local scopes has correct parameters. + +// Step 2. Convert main_program to SSA form and dependency graph. Also, insert +// ncclOp +#ifdef PADDLE_WITH_CUDA + details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, + params, member_->local_scopes_, + member_->nccl_ctxs_.get()); +#else + details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, + params, member_->local_scopes_); +#endif + auto graph = builder.Build(main_program); + + member_->executor_.reset(new details::ThreadedSSAGraphExecutor( + num_threads, use_event, member_->local_scopes_, places, std::move(graph), + allow_op_delay)); + + // Step 3. Create vars in each scope; + for (auto *var : main_program.Block(0).AllVars()) { + member_->var_types_.emplace_back(var->Name(), var->GetType(), + var->Persistable()); + } +} + +void ParallelExecutor::BCastParamsToGPUs( + const std::unordered_set &vars) const { +#ifdef PADDLE_WITH_CUDA + auto *main_scope = member_->local_scopes_[0]; + + for (auto &var : vars) { + auto *main_var = main_scope->FindVar(var); + if (main_var == nullptr || !main_var->IsType()) { + continue; + } + + auto &main_tensor = main_var->Get(); + auto &dims = main_tensor.dims(); + if (paddle::platform::is_gpu_place(main_tensor.place())) { + size_t numel = main_tensor.numel(); + ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type()); + platform::NCCLGroupGuard guard; + for (size_t i = 0; i < member_->places_.size(); ++i) { + auto place = member_->places_[i]; + void *buffer; + if (i == 0) { + buffer = const_cast(main_tensor.data()); + } else { + auto local_scope = member_->local_scopes_[i]; + auto *t = local_scope->Var(var)->GetMutable(); + t->Resize(dims); + buffer = t->mutable_data(place, main_tensor.type()); + } + auto &nccl_ctx = member_->nccl_ctxs_->at(place); + platform::dynload::ncclBcast(buffer, numel, data_type, 0, + nccl_ctx.comm_, nccl_ctx.stream()); + } + } else { + platform::CPUPlace cpu; + for (size_t i = 1; i < member_->places_.size(); ++i) { + auto local_scope = member_->local_scopes_[i]; + auto *t = local_scope->Var(var)->GetMutable(); + t->Resize(dims); + t->mutable_data(cpu, main_tensor.type()); + paddle::framework::TensorCopy(main_tensor, cpu, t); + } + } + member_->nccl_ctxs_->WaitAll(); + } +#else + PADDLE_THROW("Not compiled with CUDA"); +#endif +} + +void ParallelExecutor::Run( + const std::vector &fetch_tensors, + const std::string &fetched_var_name, + const std::unordered_map &feed_tensors) { + platform::RecordBlock b(0); + SplitTensorToPlaces(feed_tensors); + + // Create local scopes. + for (auto &scope : member_->local_scopes_) { + Scope &local_scope = scope->NewScope(); + *scope->Var(details::kLocalExecScopeName)->GetMutable() = + &local_scope; + + for (auto &name_type_pair : member_->var_types_) { + if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) { + continue; + } + + if (std::get<2>(name_type_pair)) { // Persistable + InitializeVariable(scope->Var(std::get<0>(name_type_pair)), + std::get<1>(name_type_pair)); + } else { + InitializeVariable(scope->Var(std::get<0>(name_type_pair)), + std::get<1>(name_type_pair)); + } + } + } + + auto fetch_data = member_->executor_->Run(fetch_tensors); + *member_->global_scope_->Var(fetched_var_name)->GetMutable() = + fetch_data; + + // Wait All computational streams + for (auto p : member_->places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); + } + for (auto &scope : member_->local_scopes_) { + auto &local_scope = + *scope->Var(details::kLocalExecScopeName)->GetMutable(); + scope->DeleteScope(local_scope); + local_scope = nullptr; + } +} + +void ParallelExecutor::SplitTensorToPlaces( + const std::unordered_map &feed_tensors) { + for (auto it : feed_tensors) { + auto lod_tensors = it.second.SplitLoDTensor(member_->places_); + PADDLE_ENFORCE_EQ( + member_->places_.size(), lod_tensors.size(), + "The number of samples of current batch is less than the count of " + "devices, currently, it is not allowed. (%d vs %d)", + member_->places_.size(), lod_tensors.size()); + for (size_t j = 0; j < member_->places_.size(); ++j) { + // TODO(panxy0718): Do I need to delete this var? + auto t = + member_->local_scopes_[j]->Var(it.first)->GetMutable(); + t->ShareDataWith(lod_tensors[j]); + t->set_lod(lod_tensors[j].lod()); + } + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..b4f16dba858fb279ec23a8a04257dda6651148cc --- /dev/null +++ b/paddle/fluid/framework/parallel_executor.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { + +class ParallelExecutorPrivate; + +class ParallelExecutor { + DISABLE_COPY_AND_ASSIGN(ParallelExecutor); + + public: + explicit ParallelExecutor(size_t num_threads, bool use_event, + const std::vector& places, + const std::unordered_set& params, + const std::unordered_set& bcast_vars, + const ProgramDesc& main_program, + const std::string& loss_var_name, Scope* scope, + const std::vector& local_scopes, + bool allow_op_delay); + + std::vector& GetLocalScopes(); + + void Run(const std::vector& fetch_tensors, + const std::string& fetched_var_name, + const std::unordered_map& feed_tensors); + + void BCastParamsToGPUs(const std::unordered_set& vars) const; + + private: + void SplitTensorToPlaces( + const std::unordered_map& feed_tensors); + + ParallelExecutorPrivate* member_; +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 049731c7216e542dedcf8754eef79f0a672291d6..77d17fbbccca0292e21acd5e8fa90448527b95c0 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -85,9 +85,9 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { } const std::vector ProgramDesc::GetFeedTargetNames() { - BlockDesc *global_block = blocks_[0].get(); + auto &global_block = Block(0); std::vector feed_target_names; - for (auto *op : global_block->AllOps()) { + for (auto *op : global_block.AllOps()) { if (op->Type() == kFeedOpType) { feed_target_names.insert(feed_target_names.begin(), op->Output("Out")[0]); } @@ -96,9 +96,9 @@ const std::vector ProgramDesc::GetFeedTargetNames() { } const std::vector ProgramDesc::GetFetchTargetNames() { - BlockDesc *global_block = blocks_[0].get(); + auto &global_block = Block(0); std::vector fetch_target_names; - for (auto *op : global_block->AllOps()) { + for (auto *op : global_block.AllOps()) { if (op->Type() == kFetchOpType) { fetch_target_names.push_back(op->Input("X")[0]); } @@ -106,5 +106,43 @@ const std::vector ProgramDesc::GetFetchTargetNames() { return fetch_target_names; } +void ProgramDesc::SetFeedHolderName(const std::string &feed_holder_name) { + auto *global_block = MutableBlock(0); + int index = 0; + for (auto *op : global_block->AllOps()) { + if (op->Type() == kFeedOpType) { + // Unify the input's name of all feed_ops to feed_holder_name + global_block->RemoveVar(op->Input("X")[0]); + op->SetInput("X", {feed_holder_name}); + op->SetAttr("col", {index}); + op->CheckAttrs(); + index++; + } + } + + auto *feed_holder = global_block->Var(feed_holder_name); + feed_holder->SetType(proto::VarType::FEED_MINIBATCH); + feed_holder->SetPersistable(true); +} + +void ProgramDesc::SetFetchHolderName(const std::string &fetch_holder_name) { + auto *global_block = MutableBlock(0); + int index = 0; + for (auto *op : global_block->AllOps()) { + if (op->Type() == kFetchOpType) { + // Unify the output's name of all fetch_ops to fetch_holder_name + global_block->RemoveVar(op->Output("Out")[0]); + op->SetOutput("Out", {fetch_holder_name}); + op->SetAttr("col", {index}); + op->CheckAttrs(); + index++; + } + } + + auto *fetch_holder = global_block->Var(fetch_holder_name); + fetch_holder->SetType(proto::VarType::FETCH_LIST); + fetch_holder->SetPersistable(true); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/program_desc.h b/paddle/fluid/framework/program_desc.h index 538a0372116e6f90fd2fae5f00097b8efc5dcb5c..4288081be72c44c0fc3584b50c41a270eac9e204 100644 --- a/paddle/fluid/framework/program_desc.h +++ b/paddle/fluid/framework/program_desc.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/framework.pb.h" @@ -52,9 +53,26 @@ class ProgramDesc { proto::ProgramDesc *Proto(); + // The output variable of feed_op is referenced as feed_target. + // This function is used to collect the output variable's name of all + // feed_ops. const std::vector GetFeedTargetNames(); + + // The input variable of fetch_op is referenced as fetch_target. + // This function is used to collect the input variable's name of all + // fetch_ops. const std::vector GetFetchTargetNames(); + // The input variable of feed_op that holds input Tensor provided by users is + // referenced as feed_holder. + // This function is used to change or unify the feed_holder variables' name. + void SetFeedHolderName(const std::string &feed_holder_name); + + // The output variable of fetch_op that holds output Tensor needed by users is + // referenced as fetch_holder. + // This function is used to change or unify the fetch_holder variables' name. + void SetFetchHolderName(const std::string &fetch_holder_name); + private: proto::ProgramDesc desc_; diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index 66618a291b59996836e822587af618927a4263c7..6c46e9aad5b7fbf67fdcc07a12e7932ac8b6412b 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -66,7 +66,7 @@ TEST(ProgramDesc, copy_ctor) { for (size_t i = 0; i < global_block->OpSize(); ++i) { auto op_origin = global_block->Op(i); - auto op_copy = global_block->Op(i); + auto op_copy = global_block_copy->Op(i); ASSERT_EQ(op_origin->Type(), op_copy->Type()); ASSERT_EQ(op_origin->Inputs(), op_copy->Inputs()); @@ -131,7 +131,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) { for (size_t i = 0; i < global_block->OpSize(); ++i) { auto op_origin = global_block->Op(i); - auto op_restored = global_block->Op(i); + auto op_restored = global_block_restored->Op(i); ASSERT_EQ(op_origin->Type(), op_restored->Type()); ASSERT_EQ(op_origin->Inputs(), op_restored->Inputs()); diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index 0e44b34383027ef58a033eb082f4bb2118b5d8a3..8af7d2d510d36e4c24ce3ae8dbc13c24ad5d4a0f 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -14,18 +14,17 @@ limitations under the License. */ #include "paddle/fluid/framework/prune.h" +#include +#include + #include "paddle/fluid/framework/attribute.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/program_desc.h" -#include - namespace f = paddle::framework; -namespace ops = paddle::operators; void AddOp(const std::string &type, const f::VariableNameMap &inputs, const f::VariableNameMap &outputs, f::AttributeMap attrs, diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index fa00c08e0d5791ee1187aed38b4d140564b7c97d..76126f3dc64d71770d13f9d66bb30f176c112629 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -22,14 +22,16 @@ FileReader::FileReader(const std::vector &dims) : dims_(dims) {} void FileReader::ReadNext(std::vector *out) { ReadNextImpl(out); - PADDLE_ENFORCE_EQ(out->size(), dims_.size()); + if (out->empty()) { + return; + } for (size_t i = 0; i < dims_.size(); ++i) { auto &actual = out->at(i).dims(); auto &expect = dims_[i]; PADDLE_ENFORCE_EQ(actual.size(), expect.size()); for (int j = 0; j < actual.size(); ++j) { - PADDLE_ENFORCE(actual[i] == expect[i] || expect[i] == -1); + // PADDLE_ENFORCE(actual[i] == expect[i] || expect[i] == -1); } } } diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index 3573b99becf6d657c680c5fec0bda4bdde5dd7a2..3a413941df964c8d9454fafc6030c377c10f9fb1 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -14,14 +14,13 @@ #pragma once +#include +#include + #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/platform/place.h" -#include -#include -#include - namespace paddle { namespace framework { @@ -31,8 +30,6 @@ class ReaderBase { virtual void ReInit() = 0; - virtual bool HasNext() const = 0; - virtual ~ReaderBase(); }; @@ -44,8 +41,6 @@ class DecoratedReader : public ReaderBase { void ReInit() override { reader_->ReInit(); } - bool HasNext() const override { return reader_->HasNext(); } - protected: ReaderBase* reader_; }; @@ -80,8 +75,6 @@ class ReaderHolder { reader_->ReInit(); } - bool HasNext() const { return reader_->HasNext(); } - private: std::unique_ptr reader_; }; diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 17e38b1cf042657834b4d0d1c12cbbb92f19fa45..194df3e4a8b50700e2be01ce5ebca83b92501fb8 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include "paddle/fluid/framework/scope.h" #include // for unique_ptr -#include // for call_once #include #include "glog/logging.h" #include "paddle/fluid/framework/threadpool.h" @@ -39,6 +38,7 @@ Scope::~Scope() { } Scope& Scope::NewScope() const { + std::unique_lock lock(mutex_); kids_.push_back(new Scope(this)); return *kids_.back(); } @@ -92,6 +92,7 @@ std::vector Scope::LocalVarNames() const { } void Scope::DeleteScope(Scope* scope) { + std::unique_lock lock(mutex_); auto it = std::find(this->kids_.begin(), this->kids_.end(), scope); PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope); this->kids_.erase(it); @@ -103,7 +104,7 @@ void Scope::DeleteScope(Scope* scope) { } } -void Scope::EraseVars(std::vector& var_names) { +void Scope::EraseVars(const std::vector& var_names) { std::set var_set(var_names.begin(), var_names.end()); for (auto it = vars_.begin(); it != vars_.end();) { if (var_set.find(it->first) != var_set.end()) { diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index c1e1f49caaa5a60df0e97289aada465b45213971..c8cb70549f1d131b66fa7c6eeb35f3b7151a9e7f 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include // NOLINT #include #include #include @@ -51,13 +52,13 @@ class Scope { /// Create a variable with a scope-unique name. Variable* Var(std::string* name = nullptr); - void EraseVars(std::vector& var_names); + void EraseVars(const std::vector& var_names); /// Find a variable in the scope or any of its ancestors. Returns /// nullptr if cannot find. Variable* FindVar(const std::string& name) const; - const Scope& parent() const { return *parent_; } + const Scope* parent() const { return parent_; } /// Find the scope or an ancestor scope that contains the given variable. const Scope* FindScope(const Variable* var) const; @@ -88,6 +89,9 @@ class Scope { Scope const* parent_{nullptr}; DISABLE_COPY_AND_ASSIGN(Scope); + + private: + mutable std::mutex mutex_; }; } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 504344e937dfdc362cdc22298a5f963d87011e9d..d9d6b7dd67f1c6e4bbd6a4e1a8f0843d4cb93c05 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,6 +16,7 @@ limitations under the License. */ namespace paddle { namespace framework { + void SerializeToStream(std::ostream& os, const SelectedRows& selected_rows, const platform::DeviceContext& dev_ctx) { { // the 1st field, uint32_t version diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index c9c2c1bb721f2c527fa52f45cc54883f639f4ef8..8e2d9470d3954e0f66c74828a8d8292c2875a8f4 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -10,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" @@ -44,6 +50,15 @@ class SelectedRows { void set_rows(const Vector& rows) { rows_ = rows; } + /** + * get the index of id in rows + */ + int64_t index(int64_t id) const { + auto it = std::find(rows_.begin(), rows_.end(), id); + PADDLE_ENFORCE(it != rows_.end(), "id should be in rows"); + return static_cast(std::distance(rows_.begin(), it)); + } + DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); dims[0] = height_; @@ -52,7 +67,7 @@ class SelectedRows { private: // Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9} here. - // SelectedRows are simplely concated when adding together. Until a + // SelectedRows are simply concated when adding together. Until a // SelectedRows add a Tensor, will the duplicate rows be handled. Vector rows_; std::unique_ptr value_{nullptr}; diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f7a6b5ba84ca1762bd903790aa3c0346b22ed035..6f878541e6de1deec1829145b1b325ecd176a034 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -45,11 +45,10 @@ class Tensor { friend struct EigenVector; public: - Tensor() : offset_(0), is_pinned_(false) {} + Tensor() : offset_(0) {} /*! Constructor with place should only be used in pybind. */ - explicit Tensor(const platform::Place& place) - : offset_(0), is_pinned_(false) { + explicit Tensor(const platform::Place& place) : offset_(0) { holder_->set_place(place); } @@ -70,12 +69,11 @@ class Tensor { * @note If not exist, then allocation. */ template - inline T* mutable_data(platform::Place place, bool is_pinned = false); + inline T* mutable_data(platform::Place place); - inline void* mutable_data(platform::Place place, std::type_index type, - bool is_pinned = false); + inline void* mutable_data(platform::Place place, std::type_index type); - inline void* mutable_data(platform::Place place, bool is_pinned = false); + inline void* mutable_data(platform::Place place); /** * @brief Return a pointer to mutable memory block. @@ -86,8 +84,7 @@ class Tensor { * @note If not exist, then allocation. */ template - inline T* mutable_data(DDim dims, platform::Place place, - bool is_pinned = false); + inline T* mutable_data(DDim dims, platform::Place place); /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; @@ -95,9 +92,6 @@ class Tensor { /*! Return the numel of the memory block. */ inline int64_t numel() const; - /*! Return the numel of the memory block. */ - inline bool isPinned() const; - /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); @@ -152,14 +146,12 @@ class Tensor { template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(Place place, size_t size, std::type_index type, - bool is_pinned = false) - : ptr_(static_cast(memory::Alloc(place, size, is_pinned)), - memory::PODDeleter(place, is_pinned)), + PlaceholderImpl(Place place, size_t size, std::type_index type) + : ptr_(static_cast(memory::Alloc(place, size)), + memory::PODDeleter(place)), place_(place), size_(size), - type_(type), - is_pinned_(is_pinned) { + type_(type) { PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", (is_cpu_place(place_) ? "CPU" : "GPU")); } @@ -182,9 +174,6 @@ class Tensor { /* the current type of memory */ std::type_index type_; - - /*! use pinned memory or not. */ - bool is_pinned_; }; /*! holds the memory block if allocated. */ @@ -219,7 +208,6 @@ class Tensor { * PlaceHolder::ptr_ and where the tensor data really begins. */ size_t offset_; - bool is_pinned_; }; inline void Tensor::switch_place(platform::Place new_place) { diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 113814971e115fa88bd0ded34017fa26a9dd5803..f49d1a47a325b2aac6185073203df124be18b54d 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -101,21 +101,19 @@ inline T* Tensor::data() { } template -inline T* Tensor::mutable_data(DDim dims, platform::Place place, - bool is_pinned) { +inline T* Tensor::mutable_data(DDim dims, platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); - return mutable_data(place, is_pinned); + return mutable_data(place); } template -inline T* Tensor::mutable_data(platform::Place place, bool is_pinned) { +inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - return reinterpret_cast(mutable_data(place, typeid(T), is_pinned)); + return reinterpret_cast(mutable_data(place, typeid(T))); } -inline void* Tensor::mutable_data(platform::Place place, std::type_index type, - bool is_pinned) { +inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -129,27 +127,33 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type, holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { holder_.reset(new PlaceholderImpl( - boost::get(place), size, type, is_pinned)); - } else if (platform::is_gpu_place(place)) { + boost::get(place), size, type)); + } else if (platform::is_gpu_place(place) || + platform::is_cuda_pinned_place(place)) { #ifndef PADDLE_WITH_CUDA - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW( + "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); } #else - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type, is_pinned)); + if (platform::is_gpu_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } else if (platform::is_cuda_pinned_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } } #endif offset_ = 0; - is_pinned_ = is_pinned; } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } -inline void* Tensor::mutable_data(platform::Place place, bool is_pinned) { +inline void* Tensor::mutable_data(platform::Place place) { PADDLE_ENFORCE(this->holder_ != nullptr, - "Cannot invoke mutable data if current hold nothing"); - return mutable_data(place, holder_->type(), is_pinned); + "Cannot invoke mutable data if current hold nothing."); + return mutable_data(place, holder_->type()); } inline Tensor& Tensor::ShareDataWith(const Tensor& src) { @@ -191,8 +195,6 @@ inline const DDim& Tensor::dims() const { return dims_; } inline int64_t Tensor::numel() const { return product(dims_); } -inline bool Tensor::isPinned() const { return is_pinned_; } - inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { Tensor res; res.ShareDataWith(src); diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 8b7533ce712b0a01060842b6f71449ed6bd23e2c..d1b01ae05b808b229309e9689165483a11530c84 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -11,8 +11,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/framework/tensor_util.h" +#include +#include +#include namespace paddle { namespace framework { @@ -65,8 +67,6 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto dst_gpu_place = boost::get(dst_place); auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); - PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); memory::Copy( dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, reinterpret_cast(ctx).stream()); @@ -148,6 +148,11 @@ struct AnyVisitor : public boost::static_visitor { const platform::CPUPlace& cpu) const { return *out.data(); } + + bool GetResult(const framework::Tensor& out, + const platform::CUDAPinnedPlace& cpu) const { + return *out.data(); + } }; template diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index 9854d618d2b29ed123833f55198179638c95d6db..f26f212d4d5793b88fd1e6d782cdf983bf341879 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -14,8 +14,12 @@ #include "paddle/fluid/framework/threadpool.h" +#include "gflags/gflags.h" #include "paddle/fluid/platform/enforce.h" +DEFINE_int32(io_threadpool_size, 100, + "number of threads used for doing IO, default 100"); + namespace paddle { namespace framework { @@ -91,5 +95,20 @@ void ThreadPool::TaskLoop() { } } +std::unique_ptr ThreadPoolIO::io_threadpool_(nullptr); +std::once_flag ThreadPoolIO::io_init_flag_; + +ThreadPool* ThreadPoolIO::GetInstanceIO() { + std::call_once(io_init_flag_, &ThreadPoolIO::InitIO); + return io_threadpool_.get(); +} + +void ThreadPoolIO::InitIO() { + if (io_threadpool_.get() == nullptr) { + // TODO(typhoonzero1986): make this configurable + io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size)); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index df51fb24a588c84788d7d0b671f932ff4c40f9c2..94111ee335b1a5df327b3e46d62069b4735c54f6 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include -#include -#include +#include // NOLINT +#include // NOLINT #include -#include +#include // NOLINT #include #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" @@ -28,10 +28,28 @@ limitations under the License. */ namespace paddle { namespace framework { +struct ExceptionHandler { + mutable std::future> future_; + explicit ExceptionHandler( + std::future>&& f) + : future_(std::move(f)) {} + void operator()() const { + auto ex = this->future_.get(); + if (ex != nullptr) { + LOG(FATAL) << "The exception is thrown inside the thread pool. You " + "should use RunAndGetException to handle the exception.\n" + "The default exception handler is LOG(FATAL)." + << ex->what(); + } + } +}; + // ThreadPool maintains a queue of tasks, and runs them using a fixed // number of threads. class ThreadPool { public: + explicit ThreadPool(int num_threads); + using Task = std::packaged_task()>; // Returns the singleton of ThreadPool. @@ -85,26 +103,8 @@ class ThreadPool { void Wait(); private: - struct ExceptionHandler { - mutable std::future> future_; - explicit ExceptionHandler( - std::future>&& f) - : future_(std::move(f)) {} - void operator()() const { - auto ex = this->future_.get(); - if (ex != nullptr) { - LOG(FATAL) << "The exception is thrown inside the thread pool. You " - "should use RunAndGetException to handle the exception.\n" - "The default exception handler is LOG(FATAL)." - << ex->what(); - } - } - }; - DISABLE_COPY_AND_ASSIGN(ThreadPool); - explicit ThreadPool(int num_threads); - // If the task queue is empty and avaialbe is equal to the number of // threads, means that all tasks are completed. Note: this function // is not thread-safe. Returns true if all tasks are completed. @@ -135,6 +135,17 @@ class ThreadPool { std::condition_variable completed_; }; +class ThreadPoolIO : ThreadPool { + public: + static ThreadPool* GetInstanceIO(); + static void InitIO(); + + private: + // NOTE: threadpool in base will be inhereted here. + static std::unique_ptr io_threadpool_; + static std::once_flag io_init_flag_; +}; + // Run a function asynchronously. // NOTE: The function must return void. If the function need to return a value, // you can use lambda to capture a value pointer. @@ -143,5 +154,10 @@ std::future Async(Callback callback) { return ThreadPool::GetInstance()->Run(callback); } +template +std::future AsyncIO(Callback callback) { + return ThreadPoolIO::GetInstanceIO()->Run(callback); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tuple.h b/paddle/fluid/framework/tuple.h index 78996908b18a5a0935d8de9920e8ccef9069e74b..f6c6a1fec13d8b12efd1fa71a7a93316e484d045 100644 --- a/paddle/fluid/framework/tuple.h +++ b/paddle/fluid/framework/tuple.h @@ -35,24 +35,25 @@ class Tuple { public: using ElementVars = std::vector; - Tuple(std::vector& var, std::vector& var_desc) + Tuple(const std::vector& var, + const std::vector& var_desc) : var_(var), var_desc_(var_desc) {} - Tuple(std::vector& var) : var_(var) {} + explicit Tuple(std::vector& var) : var_(var) {} - ElementVar get(int idx) const { return var_[idx]; }; + ElementVar get(int idx) const { return var_[idx]; } - ElementVar& get(int idx) { return var_[idx]; }; + ElementVar& get(int idx) { return var_[idx]; } - bool isSameType(Tuple& t) const; + bool isSameType(const Tuple& t) const; - size_t getSize() const { return var_.size(); }; + size_t getSize() const { return var_.size(); } private: ElementVars var_; std::vector var_desc_; }; -bool Tuple::isSameType(Tuple& t) const { +bool Tuple::isSameType(const Tuple& t) const { size_t tuple_size = getSize(); if (tuple_size != t.getSize()) { return false; diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index aff427310f15be72f5c8d0fa1537ffa6bbe2881d..8494edee6c2c714c285c45bbb4fe1d8cb1a524aa 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,4 +1,4 @@ -set(FLUID_CORE_MODULES proto_desc paddle_memory lod_tensor executor prune init) +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor init) cc_library(paddle_fluid_api SRCS io.cc @@ -11,7 +11,7 @@ cc_library(paddle_fluid DEPS ${fluid_modules}) # Create shared library cc_library(paddle_fluid_shared SHARED SRCS io.cc - DEPS ARCHIVE_START ${GLOB_OP_LIB} ${FLUID_CORE_MODULES} ARCHIVE_END) + DEPS ${fluid_modules}) set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) if(NOT APPLE) # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. @@ -21,4 +21,7 @@ endif() if(WITH_TESTING) add_subdirectory(tests/book) + if (WITH_TENSORRT) + add_subdirectory(tensorrt) + endif() endif() diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 52e9c0baa64508f82d0a86a88c8c5f8d23f9f7f2..3b58019db6e55fa8198d2f77731095c6cf356266 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -17,10 +17,16 @@ limitations under the License. */ #include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/pybind/pybind.h" namespace paddle { namespace inference { +// Temporarily add this function for exposing framework::InitDevices() when +// linking the inference shared library. +void Init(bool init_p2p) { framework::InitDevices(init_p2p); } + void ReadBinaryFile(const std::string& filename, std::string& contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", filename); @@ -41,8 +47,7 @@ bool IsPersistable(const framework::VarDesc* var) { return false; } -void LoadPersistables(framework::Executor& executor, - framework::Scope& scope, +void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const framework::ProgramDesc& main_program, const std::string& dirname, const std::string& param_filename) { @@ -108,10 +113,8 @@ std::unique_ptr Load(framework::Executor& executor, } std::unique_ptr Load( - framework::Executor& executor, - framework::Scope& scope, - const std::string& prog_filename, - const std::string& param_filename) { + framework::Executor& executor, framework::Scope& scope, + const std::string& prog_filename, const std::string& param_filename) { std::string model_filename = prog_filename; std::string program_desc_str; ReadBinaryFile(model_filename, program_desc_str); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index 6817a6fca047c9336233697a7bee4e5e16eedd5e..756c936b33ad55e2994542b171b945e248ba2e21 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -18,14 +18,16 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" namespace paddle { namespace inference { -void LoadPersistables(framework::Executor& executor, - framework::Scope& scope, +void Init(bool init_p2p); + +void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const framework::ProgramDesc& main_program, const std::string& dirname, const std::string& param_filename); diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e39c0daac76e0993382868289f66351da3d16f8f --- /dev/null +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -0,0 +1 @@ +nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) diff --git a/paddle/fluid/inference/tensorrt/test_tensorrt.cc b/paddle/fluid/inference/tensorrt/test_tensorrt.cc new file mode 100644 index 0000000000000000000000000000000000000000..a81a708e7a79225fd52c4b8e081afdcd8fe7e9ad --- /dev/null +++ b/paddle/fluid/inference/tensorrt/test_tensorrt.cc @@ -0,0 +1,155 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include +#include "NvInfer.h" +#include "cuda.h" +#include "cuda_runtime_api.h" +#include "paddle/fluid/platform/dynload/tensorrt.h" + +namespace dy = paddle::platform::dynload; + +class Logger : public nvinfer1::ILogger { + public: + void log(nvinfer1::ILogger::Severity severity, const char* msg) override { + switch (severity) { + case Severity::kINFO: + LOG(INFO) << msg; + break; + case Severity::kWARNING: + LOG(WARNING) << msg; + break; + case Severity::kINTERNAL_ERROR: + case Severity::kERROR: + LOG(ERROR) << msg; + break; + default: + break; + } + } +}; + +class ScopedWeights { + public: + ScopedWeights(float value) : value_(value) { + w.type = nvinfer1::DataType::kFLOAT; + w.values = &value_; + w.count = 1; + } + const nvinfer1::Weights& get() { return w; } + + private: + float value_; + nvinfer1::Weights w; +}; + +// The following two API are implemented in TensorRT's header file, cannot load +// from the dynamic library. So create our own implementation and directly +// trigger the method from the dynamic library. +nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} +nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} + +const char* kInputTensor = "input"; +const char* kOutputTensor = "output"; + +// Creates a network to compute y = 2x + 3 +nvinfer1::IHostMemory* CreateNetwork() { + Logger logger; + // Create the engine. + nvinfer1::IBuilder* builder = createInferBuilder(logger); + ScopedWeights weights(2.); + ScopedWeights bias(3.); + + nvinfer1::INetworkDefinition* network = builder->createNetwork(); + // Add the input + auto input = network->addInput(kInputTensor, nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 1, 1}); + EXPECT_NE(input, nullptr); + // Add the hidden layer. + auto layer = network->addFullyConnected(*input, 1, weights.get(), bias.get()); + EXPECT_NE(layer, nullptr); + // Mark the output. + auto output = layer->getOutput(0); + output->setName(kOutputTensor); + network->markOutput(*output); + // Build the engine. + builder->setMaxBatchSize(1); + builder->setMaxWorkspaceSize(1 << 10); + auto engine = builder->buildCudaEngine(*network); + EXPECT_NE(engine, nullptr); + // Serialize the engine to create a model, then close. + nvinfer1::IHostMemory* model = engine->serialize(); + network->destroy(); + engine->destroy(); + builder->destroy(); + return model; +} + +void Execute(nvinfer1::IExecutionContext& context, const float* input, + float* output) { + const nvinfer1::ICudaEngine& engine = context.getEngine(); + // Two binds, input and output + ASSERT_EQ(engine.getNbBindings(), 2); + const int input_index = engine.getBindingIndex(kInputTensor); + const int output_index = engine.getBindingIndex(kOutputTensor); + // Create GPU buffers and a stream + void* buffers[2]; + ASSERT_EQ(0, cudaMalloc(&buffers[input_index], sizeof(float))); + ASSERT_EQ(0, cudaMalloc(&buffers[output_index], sizeof(float))); + cudaStream_t stream; + ASSERT_EQ(0, cudaStreamCreate(&stream)); + // Copy the input to the GPU, execute the network, and copy the output back. + ASSERT_EQ(0, cudaMemcpyAsync(buffers[input_index], input, sizeof(float), + cudaMemcpyHostToDevice, stream)); + context.enqueue(1, buffers, stream, nullptr); + ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), + cudaMemcpyDeviceToHost, stream)); + cudaStreamSynchronize(stream); + + // Release the stream and the buffers + cudaStreamDestroy(stream); + ASSERT_EQ(0, cudaFree(buffers[input_index])); + ASSERT_EQ(0, cudaFree(buffers[output_index])); +} + +TEST(TensorrtTest, BasicFunction) { + // Create the network serialized model. + nvinfer1::IHostMemory* model = CreateNetwork(); + + // Use the model to create an engine and an execution context. + Logger logger; + nvinfer1::IRuntime* runtime = createInferRuntime(logger); + nvinfer1::ICudaEngine* engine = + runtime->deserializeCudaEngine(model->data(), model->size(), nullptr); + model->destroy(); + nvinfer1::IExecutionContext* context = engine->createExecutionContext(); + + // Execute the network. + float input = 1234; + float output; + Execute(*context, &input, &output); + EXPECT_EQ(output, input * 2 + 3); + + // Destroy the engine. + context->destroy(); + engine->destroy(); + runtime->destroy(); +} diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index e7ffb00ec8d8926193fe510ebdb7185f75c90906..97d9f03f88ad3e851a2dd4256d34e8ca76fdfb01 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -4,7 +4,7 @@ function(inference_test TARGET_NAME) set(multiValueArgs ARGS) cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/fluid/tests) + set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) set(arg_list "") if(inference_test_ARGS) foreach(arg ${inference_test_ARGS}) @@ -17,14 +17,15 @@ function(inference_test TARGET_NAME) string(REGEX REPLACE "^_$" "" arg "${arg}") cc_test(test_inference_${TARGET_NAME}${arg} SRCS test_inference_${TARGET_NAME}.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + DEPS paddle_fluid ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.inference.model) set_tests_properties(test_inference_${TARGET_NAME}${arg} PROPERTIES DEPENDS test_${TARGET_NAME}) endforeach() endfunction(inference_test) -inference_test(fit_a_line) +# This unittest is buggy! +#inference_test(fit_a_line) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp conv) diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index 9ab808efec3abdb86724fb16725962958c5cf55c..2c5b66a32903f4ffdedb074b31aec53ae6cacaf3 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -9,9 +9,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" +#include "paddle/fluid/inference/tests/test_multi_thread_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -26,32 +27,63 @@ TEST(inference, fit_a_line) { // 0. Call `paddle::framework::InitDevices()` initialize all the devices // In unittests, this is done in paddle/testing/paddle_gtest_main.cc - paddle::framework::LoDTensor input; - // The second dim of the input tensor should be 13 - // The input data should be >= 0 - int64_t batch_size = 10; - SetupTensor( - input, {batch_size, 13}, static_cast(0), static_cast(10)); - std::vector cpu_feeds; - cpu_feeds.push_back(&input); + for (int num_threads : {1, 2}) { + std::vector> cpu_feeds; + cpu_feeds.resize(num_threads); + for (int i = 0; i < num_threads; ++i) { + auto* input = new paddle::framework::LoDTensor(); + // The second dim of the input tensor should be 13 + // The input data should be >= 0 + int64_t batch_size = 10; + SetupTensor(input, {batch_size, 13}, static_cast(0), + static_cast(10)); + cpu_feeds[i].push_back(input); + } - paddle::framework::LoDTensor output1; - std::vector cpu_fetchs1; - cpu_fetchs1.push_back(&output1); + std::vector> cpu_fetchs1; + cpu_fetchs1.resize(num_threads); + for (int i = 0; i < num_threads; ++i) { + auto* output = new paddle::framework::LoDTensor(); + cpu_fetchs1[i].push_back(output); + } - // Run inference on CPU - TestInference(dirname, cpu_feeds, cpu_fetchs1); - LOG(INFO) << output1.dims(); + // Run inference on CPU + LOG(INFO) << "--- CPU Runs (num_threads: " << num_threads << "): ---"; + if (num_threads == 1) { + TestInference(dirname, cpu_feeds[0], + cpu_fetchs1[0]); + } else { + TestMultiThreadInference( + dirname, cpu_feeds, cpu_fetchs1, num_threads); + } #ifdef PADDLE_WITH_CUDA - paddle::framework::LoDTensor output2; - std::vector cpu_fetchs2; - cpu_fetchs2.push_back(&output2); + std::vector> cpu_fetchs2; + cpu_fetchs2.resize(num_threads); + for (int i = 0; i < num_threads; ++i) { + auto* output = new paddle::framework::LoDTensor(); + cpu_fetchs2[i].push_back(output); + } - // Run inference on CUDA GPU - TestInference(dirname, cpu_feeds, cpu_fetchs2); - LOG(INFO) << output2.dims(); + // Run inference on CUDA GPU + LOG(INFO) << "--- GPU Runs (num_threads: " << num_threads << "): ---"; + if (num_threads == 1) { + TestInference(dirname, cpu_feeds[0], + cpu_fetchs2[0]); + } else { + TestMultiThreadInference( + dirname, cpu_feeds, cpu_fetchs2, num_threads); + } - CheckError(output1, output2); + for (int i = 0; i < num_threads; ++i) { + CheckError(*cpu_fetchs1[i][0], *cpu_fetchs2[i][0]); + delete cpu_fetchs2[i][0]; + } #endif + + for (int i = 0; i < num_threads; ++i) { + delete cpu_feeds[i][0]; + delete cpu_fetchs1[i][0]; + } + } // num_threads-loop } diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index e9a27171f1cd68e7b10c860fb4a1417b930ed565..1e6555bb02033a28dedd2a1d1962981dfcc97cc2 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,10 +35,8 @@ TEST(inference, image_classification) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [0.0, 1.0]. - SetupTensor(input, - {FLAGS_batch_size, 3, 32, 32}, - static_cast(0), - static_cast(1)); + SetupTensor(&input, {FLAGS_batch_size, 3, 32, 32}, + static_cast(0), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -48,7 +46,7 @@ TEST(inference, image_classification) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: ---"; - TestInference( + TestInference( dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat); LOG(INFO) << output1.dims(); @@ -59,7 +57,7 @@ TEST(inference, image_classification) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: ---"; - TestInference( + TestInference( dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat); LOG(INFO) << output2.dims(); diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 184924016634bba26204d937744ca5fa87cd443c..84bb855fea5fa397ff71e2c922fea3302951b7ca 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) { int64_t predicate_dict_len = 3162; int64_t mark_dict_len = 2; - SetupLoDTensor(word, - lod, - static_cast(0), + SetupLoDTensor(&word, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(predicate, - lod, - static_cast(0), + SetupLoDTensor(&predicate, lod, static_cast(0), static_cast(predicate_dict_len - 1)); - SetupLoDTensor(ctx_n2, - lod, - static_cast(0), + SetupLoDTensor(&ctx_n2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_n1, - lod, - static_cast(0), + SetupLoDTensor(&ctx_n1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_0, - lod, - static_cast(0), + SetupLoDTensor(&ctx_0, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p1, - lod, - static_cast(0), + SetupLoDTensor(&ctx_p1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p2, - lod, - static_cast(0), + SetupLoDTensor(&ctx_p2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(mark, - lod, - static_cast(0), + SetupLoDTensor(&mark, lod, static_cast(0), static_cast(mark_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index 1fb0f9e77797cf6e61e918700763ee33a495cb96..f12828a2685305c20d26492dbf04fa9ddacf9317 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,10 +35,8 @@ TEST(inference, recognize_digits) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [-1.0, 1.0]. - SetupTensor(input, - {FLAGS_batch_size, 1, 28, 28}, - static_cast(-1), - static_cast(1)); + SetupTensor(&input, {FLAGS_batch_size, 1, 28, 28}, + static_cast(-1), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -49,8 +47,8 @@ TEST(inference, recognize_digits) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined); + TestInference(dirname, cpu_feeds, cpu_fetchs1, + FLAGS_repeat, is_combined); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -60,8 +58,8 @@ TEST(inference, recognize_digits) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined); + TestInference(dirname, cpu_feeds, cpu_fetchs2, + FLAGS_repeat, is_combined); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc index b42a33c9a90b5feafaed343a197da0e4db11b7ea..70aa6b194d4417fc85384cc3f615089f024f928e 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,25 +36,25 @@ TEST(inference, recommender_system) { // Use the first data from paddle.dataset.movielens.test() as input std::vector user_id_data = {1}; - SetupTensor(user_id, {batch_size, 1}, user_id_data); + SetupTensor(&user_id, {batch_size, 1}, user_id_data); std::vector gender_id_data = {1}; - SetupTensor(gender_id, {batch_size, 1}, gender_id_data); + SetupTensor(&gender_id, {batch_size, 1}, gender_id_data); std::vector age_id_data = {0}; - SetupTensor(age_id, {batch_size, 1}, age_id_data); + SetupTensor(&age_id, {batch_size, 1}, age_id_data); std::vector job_id_data = {10}; - SetupTensor(job_id, {batch_size, 1}, job_id_data); + SetupTensor(&job_id, {batch_size, 1}, job_id_data); std::vector movie_id_data = {783}; - SetupTensor(movie_id, {batch_size, 1}, movie_id_data); + SetupTensor(&movie_id, {batch_size, 1}, movie_id_data); std::vector category_id_data = {10, 8, 9}; - SetupLoDTensor(category_id, {3, 1}, {{0, 3}}, category_id_data); + SetupLoDTensor(&category_id, {3, 1}, {{0, 3}}, category_id_data); std::vector movie_title_data = {1069, 4140, 2923, 710, 988}; - SetupLoDTensor(movie_title, {5, 1}, {{0, 5}}, movie_title_data); + SetupLoDTensor(&movie_title, {5, 1}, {{0, 5}}, movie_title_data); std::vector cpu_feeds; cpu_feeds.push_back(&user_id); diff --git a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc index a0523905bd1631cd8768b1601e459cb9d110a84d..e15c3f59acb1eac535120554a3799c37e9d4e951 100644 --- a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) { paddle::framework::LoDTensor word_data, trg_word; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor( - word_data, lod, static_cast(0), static_cast(1)); - SetupLoDTensor( - trg_word, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(&word_data, lod, static_cast(0), + static_cast(1)); + SetupLoDTensor(&trg_word, lod, static_cast(0), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&word_data); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 824b3274ebc7ba046e61798b3f61ef9924a75679..0dbb6a30405eb64133613052ad57b1f705a9e7b4 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) { paddle::framework::LoD lod{{0, 4, 10}}; int64_t word_dict_len = 5147; - SetupLoDTensor(words, - lod, - static_cast(0), + SetupLoDTensor(&words, lod, static_cast(0), static_cast(word_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index 1481760c529c29a7290f476e2a22e1ded5ab7787..c9328eb21b4fdb06c5f65ba0f7337b1e79fa1927 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,10 +33,10 @@ TEST(inference, word2vec) { paddle::framework::LoD lod{{0, 1}}; int64_t dict_size = 2073; // The size of dictionary - SetupLoDTensor(first_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(second_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(third_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&first_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&second_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&third_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&fourth_word, lod, static_cast(0), dict_size - 1); std::vector cpu_feeds; cpu_feeds.push_back(&first_word); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index dce541c0971a6ff9a3728e915fe8c3d009c23550..c3a8d0889c6a6dd9591837ccc523da56f8d13661 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -11,59 +11,60 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once + +#include +#include +#include +#include -#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/profiler.h" template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, - T lower, - T upper) { - srand(time(0)); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - for (int i = 0; i < input.numel(); ++i) { - input_ptr[i] = - (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + - lower; +void SetupTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, T lower, T upper) { + static unsigned int seed = 100; + std::mt19937 rng(seed++); + std::uniform_real_distribution uniform_dist(0, 1); + + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + for (int i = 0; i < input->numel(); ++i) { + input_ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); } } template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, - std::vector& data) { +void SetupTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, const std::vector& data) { CHECK_EQ(paddle::framework::product(dims), static_cast(data.size())); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + memcpy(input_ptr, data.data(), input->numel() * sizeof(T)); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, - paddle::framework::LoD& lod, - T lower, - T upper) { - input.set_lod(lod); +void SetupLoDTensor(paddle::framework::LoDTensor* input, + const paddle::framework::LoD& lod, T lower, T upper) { + input->set_lod(lod); int dim = lod[0][lod[0].size() - 1]; SetupTensor(input, {dim, 1}, lower, upper); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, +void SetupLoDTensor(paddle::framework::LoDTensor* input, paddle::framework::DDim dims, - paddle::framework::LoD lod, - std::vector& data) { + const paddle::framework::LoD lod, + const std::vector& data) { const size_t level = lod.size() - 1; CHECK_EQ(dims[0], static_cast((lod[level]).back())); - input.set_lod(lod); + input->set_lod(lod); SetupTensor(input, dims, data); } template -void CheckError(paddle::framework::LoDTensor& output1, - paddle::framework::LoDTensor& output2) { +void CheckError(const paddle::framework::LoDTensor& output1, + const paddle::framework::LoDTensor& output2) { // Check lod information EXPECT_EQ(output1.lod(), output2.lod()); @@ -88,12 +89,11 @@ void CheckError(paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0U) << "There are " << count << " different elements."; } -template +template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, - std::vector& cpu_fetchs, - const int repeat = 1, - const bool is_combined = false) { + const std::vector& cpu_fetchs, + const int repeat = 1, const bool is_combined = false) { // 1. Define place, executor, scope auto place = Place(); auto executor = paddle::framework::Executor(place); @@ -132,11 +132,9 @@ void TestInference(const std::string& dirname, // `fluid.io.save_inference_model`. std::string prog_filename = "__model_combined__"; std::string param_filename = "__params_combined__"; - inference_program = - paddle::inference::Load(executor, - *scope, - dirname + "/" + prog_filename, - dirname + "/" + param_filename); + inference_program = paddle::inference::Load( + executor, *scope, dirname + "/" + prog_filename, + dirname + "/" + param_filename); } else { // Parameters are saved in separate files sited in the specified // `dirname`. @@ -169,8 +167,23 @@ void TestInference(const std::string& dirname, // 6. Run the inference program { + if (!CreateVars) { + // If users don't want to create and destroy variables every time they + // run, they need to set `create_vars` to false and manually call + // `CreateVariables` before running. + executor.CreateVariables(*inference_program, scope, 0); + } + // Ignore the profiling results of the first run - executor.Run(*inference_program, scope, feed_targets, fetch_targets); + std::unique_ptr ctx; + if (PrepareContext) { + ctx = executor.Prepare(*inference_program, 0); + executor.RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, + CreateVars); + } else { + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); + } // Enable the profiler paddle::platform::EnableProfiler(state); @@ -181,7 +194,15 @@ void TestInference(const std::string& dirname, "run_inference", paddle::platform::DeviceContextPool::Instance().Get(place)); - executor.Run(*inference_program, scope, feed_targets, fetch_targets); + if (PrepareContext) { + // Note: if you change the inference_program, you need to call + // executor.Prepare() again to get a new ExecutorPrepareContext. + executor.RunPreparedContext(ctx.get(), scope, feed_targets, + fetch_targets, CreateVars); + } else { + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); + } } // Disable the profiler and print the timing information diff --git a/paddle/fluid/inference/tests/test_multi_thread_helper.h b/paddle/fluid/inference/tests/test_multi_thread_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..56745f115db231d4350da72b7de7967175ac9fe8 --- /dev/null +++ b/paddle/fluid/inference/tests/test_multi_thread_helper.h @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include // NOLINT +#include +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/io.h" + +void ThreadedRunInference( + const std::unique_ptr& inference_program, + paddle::framework::Executor* executor, paddle::framework::Scope* scope, + const int thread_id, + const std::vector& cpu_feeds, + const std::vector& cpu_fetchs) { + auto copy_program = std::unique_ptr( + new paddle::framework::ProgramDesc(*inference_program)); + + std::string feed_holder_name = "feed_" + paddle::string::to_string(thread_id); + std::string fetch_holder_name = + "fetch_" + paddle::string::to_string(thread_id); + copy_program->SetFeedHolderName(feed_holder_name); + copy_program->SetFetchHolderName(fetch_holder_name); + + // 3. Get the feed_target_names and fetch_target_names + const std::vector& feed_target_names = + copy_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + copy_program->GetFetchTargetNames(); + + // 4. Prepare inputs: set up maps for feed targets + std::map feed_targets; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + // Please make sure that cpu_feeds[i] is right for feed_target_names[i] + feed_targets[feed_target_names[i]] = cpu_feeds[i]; + } + + // 5. Define Tensor to get the outputs: set up maps for fetch targets + std::map fetch_targets; + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; + } + + // 6. Run the inference program + executor->Run(*copy_program, scope, feed_targets, fetch_targets, true, + feed_holder_name, fetch_holder_name); +} + +template +void TestMultiThreadInference( + const std::string& dirname, + const std::vector>& cpu_feeds, + const std::vector>& cpu_fetchs, + const int num_threads) { + // 1. Define place, executor, scope + auto place = Place(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + + // 2. Initialize the inference_program and load parameters + std::unique_ptr inference_program = + paddle::inference::Load(executor, *scope, dirname); + + std::vector threads; + for (int i = 0; i < num_threads; ++i) { + threads.push_back(new std::thread( + ThreadedRunInference, std::ref(inference_program), &executor, scope, i, + std::ref(cpu_feeds[i]), std::ref(cpu_fetchs[i]))); + } + for (int i = 0; i < num_threads; ++i) { + threads[i]->join(); + delete threads[i]; + } + + delete scope; +} diff --git a/paddle/fluid/memory/.clang-format b/paddle/fluid/memory/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt index 1a61c484823b292234d4758cdc1959d7a21510e6..709fc7e12e1db537ceece30c405c0e8a2582e8ca 100644 --- a/paddle/fluid/memory/CMakeLists.txt +++ b/paddle/fluid/memory/CMakeLists.txt @@ -1,16 +1,15 @@ add_subdirectory(detail) -cc_library(memory SRCS memory.cc DEPS place enforce) +cc_library(malloc SRCS malloc.cc DEPS buddy_allocator place enforce) cc_library(memcpy SRCS memcpy.cc DEPS place) -cc_library(paddle_memory - DEPS - memory - memcpy - meta_data - meta_cache - memory_block - buddy_allocator - system_allocator) +cc_library(memory + DEPS + malloc + memcpy) -cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) +cc_test(malloc_test SRCS malloc_test.cc DEPS malloc) + +#if (WITH_GPU) +# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place memory) +#endif() diff --git a/paddle/fluid/memory/detail/CMakeLists.txt b/paddle/fluid/memory/detail/CMakeLists.txt index b9c3fc31c1523abf3acbd116745bbf1596454aac..c725dba5e98c200c2542d97cb8f53a938f6b614a 100644 --- a/paddle/fluid/memory/detail/CMakeLists.txt +++ b/paddle/fluid/memory/detail/CMakeLists.txt @@ -1,3 +1,5 @@ +cc_library(memory_block SRCS memory_block.cc memory_block_desc.cc meta_cache.cc) + if(${WITH_GPU}) nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info) else(${WITH_GPU}) @@ -6,10 +8,4 @@ endif(${WITH_GPU}) cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator) -cc_library(meta_data SRCS meta_data.cc) - -cc_library(meta_cache SRCS meta_cache.cc) - -cc_library(memory_block SRCS memory_block.cc) - -cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS glog) +cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS memory_block system_allocator glog) diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 876837838648d6733b104a5496454f5dc58bbb71..4194ba197948b47003863196efdac1c08a7ae4f6 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -46,7 +46,8 @@ inline size_t align(size_t size, size_t alignment) { void* BuddyAllocator::Alloc(size_t unaligned_size) { // adjust allocation alignment - size_t size = align(unaligned_size + sizeof(Metadata), min_chunk_size_); + size_t size = + align(unaligned_size + sizeof(MemoryBlock::Desc), min_chunk_size_); // acquire the allocator lock std::lock_guard lock(mutex_); @@ -103,7 +104,7 @@ void BuddyAllocator::Free(void* p) { return; } - block->mark_as_free(cache_); + block->mark_as_free(&cache_); total_used_ -= block->total_size(cache_); total_free_ += block->total_size(cache_); @@ -122,7 +123,7 @@ void BuddyAllocator::Free(void* p) { right_buddy)); // merge its right buddy to the block - block->merge(cache_, right_buddy); + block->merge(&cache_, right_buddy); } } @@ -139,7 +140,7 @@ void BuddyAllocator::Free(void* p) { left_buddy->total_size(cache_), left_buddy)); // merge the block to its left buddy - left_buddy->merge(cache_, block); + left_buddy->merge(&cache_, block); block = left_buddy; } } @@ -163,13 +164,13 @@ size_t BuddyAllocator::Used() { return total_used_; } void* BuddyAllocator::SystemAlloc(size_t size) { size_t index = 0; - void* p = system_allocator_->Alloc(index, size); + void* p = system_allocator_->Alloc(&index, size); VLOG(10) << "Allocated " << p << " from system allocator."; if (p == nullptr) return nullptr; - static_cast(p)->init(cache_, MemoryBlock::HUGE_CHUNK, index, + static_cast(p)->init(&cache_, MemoryBlock::HUGE_CHUNK, index, size, nullptr, nullptr); return static_cast(p)->data(); @@ -187,14 +188,14 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { // Allocate a new maximum sized block size_t index = 0; - void* p = system_allocator_->Alloc(index, max_chunk_size_); + void* p = system_allocator_->Alloc(&index, max_chunk_size_); if (p == nullptr) return pool_.end(); VLOG(10) << "Creating and inserting new block " << p << " from system allocator"; - static_cast(p)->init(cache_, MemoryBlock::FREE_CHUNK, index, + static_cast(p)->init(&cache_, MemoryBlock::FREE_CHUNK, index, max_chunk_size_, nullptr, nullptr); // gpu fallback allocation @@ -238,11 +239,11 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it, VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_) << ") into"; - block->split(cache_, size); + block->split(&cache_, size); VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_) << ")"; - block->set_type(cache_, MemoryBlock::ARENA_CHUNK); + block->set_type(&cache_, MemoryBlock::ARENA_CHUNK); // the rest of memory if exist if (block->has_right_buddy(cache_)) { diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index a4ee70c2586f37e3b2328dedfe28135e14d8b18d..2f39d774d6fb6a2bc37877eb2f8b90bebd3cda28 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" +#include // NOLINT +#include +#include +#include +#include + +#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" -#include -#include -#include -#include - namespace paddle { namespace memory { namespace detail { diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index 07123f2669c3a829ff28e9fab5a404047c5a09c7..f34b922b25a0110690671d487f190e1b977a67bb 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -13,143 +13,142 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/platform/assert.h" namespace paddle { namespace memory { namespace detail { -void MemoryBlock::init(MetadataCache& cache, Type t, size_t index, size_t size, +void MemoryBlock::init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy) { - cache.store(this, Metadata(t, index, size - sizeof(Metadata), size, - static_cast(left_buddy), - static_cast(right_buddy))); + cache->save( + this, MemoryBlock::Desc(t, index, size - sizeof(MemoryBlock::Desc), size, + static_cast(left_buddy), + static_cast(right_buddy))); } -MemoryBlock::Type MemoryBlock::type(MetadataCache& cache) const { +MemoryBlock::Type MemoryBlock::type(const MetadataCache& cache) const { return cache.load(this).type; } -size_t MemoryBlock::size(MetadataCache& cache) const { +size_t MemoryBlock::size(const MetadataCache& cache) const { return cache.load(this).size; } -size_t MemoryBlock::total_size(MetadataCache& cache) const { +size_t MemoryBlock::index(const MetadataCache& cache) const { + return cache.load(this).index; +} + +size_t MemoryBlock::total_size(const MetadataCache& cache) const { return cache.load(this).total_size; } -MemoryBlock* MemoryBlock::left_buddy(MetadataCache& cache) const { +bool MemoryBlock::has_left_buddy(const MetadataCache& cache) const { + return left_buddy(cache) != nullptr; +} + +bool MemoryBlock::has_right_buddy(const MetadataCache& cache) const { + return right_buddy(cache) != nullptr; +} + +MemoryBlock* MemoryBlock::left_buddy(const MetadataCache& cache) const { return cache.load(this).left_buddy; } -MemoryBlock* MemoryBlock::right_buddy(MetadataCache& cache) const { +MemoryBlock* MemoryBlock::right_buddy(const MetadataCache& cache) const { return cache.load(this).right_buddy; } -void MemoryBlock::split(MetadataCache& cache, size_t size) { +void MemoryBlock::split(MetadataCache* cache, size_t size) { // make sure the split fits - PADDLE_ASSERT(total_size(cache) >= size); + PADDLE_ASSERT(total_size(*cache) >= size); // bail out if there is no room for another partition - if (total_size(cache) - size <= sizeof(Metadata)) { + if (total_size(*cache) - size <= sizeof(MemoryBlock::Desc)) { return; } // find the position of the split void* right_partition = reinterpret_cast(this) + size; - size_t remaining_size = total_size(cache) - size; + size_t remaining_size = total_size(*cache) - size; // Add the new block as a buddy - auto metadata = cache.load(this); + auto metadata = cache->load(this); // Write the metadata for the new block auto new_block_right_buddy = metadata.right_buddy; - cache.store( - static_cast(right_partition), - Metadata(FREE_CHUNK, index(cache), remaining_size - sizeof(Metadata), - remaining_size, this, new_block_right_buddy)); + cache->save(static_cast(right_partition), + MemoryBlock::Desc(FREE_CHUNK, index(*cache), + remaining_size - sizeof(MemoryBlock::Desc), + remaining_size, this, new_block_right_buddy)); metadata.right_buddy = static_cast(right_partition); - metadata.size = size - sizeof(Metadata); + metadata.size = size - sizeof(MemoryBlock::Desc); metadata.total_size = size; - cache.store(this, metadata); + cache->save(this, metadata); // Write metadata for the new block's right buddy if (new_block_right_buddy != nullptr) { - auto buddy_metadata = cache.load(new_block_right_buddy); + auto buddy_metadata = cache->load(new_block_right_buddy); buddy_metadata.left_buddy = static_cast(right_partition); - cache.store(new_block_right_buddy, buddy_metadata); + cache->save(new_block_right_buddy, buddy_metadata); } } -void MemoryBlock::merge(MetadataCache& cache, MemoryBlock* right_buddy) { +void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) { // only free blocks can be merged - PADDLE_ASSERT(type(cache) == FREE_CHUNK); - PADDLE_ASSERT(right_buddy->type(cache) == FREE_CHUNK); + PADDLE_ASSERT(type(*cache) == FREE_CHUNK); + PADDLE_ASSERT(right_buddy->type(*cache) == FREE_CHUNK); - auto metadata = cache.load(this); + auto metadata = cache->load(this); // link this->buddy's buddy - metadata.right_buddy = right_buddy->right_buddy(cache); + metadata.right_buddy = right_buddy->right_buddy(*cache); // link buddy's buddy -> this if (metadata.right_buddy != nullptr) { - auto buddy_metadata = cache.load(metadata.right_buddy); + auto buddy_metadata = cache->load(metadata.right_buddy); buddy_metadata.left_buddy = this; - cache.store(metadata.right_buddy, buddy_metadata); + cache->save(metadata.right_buddy, buddy_metadata); } - metadata.size += right_buddy->total_size(cache); - metadata.total_size += right_buddy->total_size(cache); + metadata.size += right_buddy->total_size(*cache); + metadata.total_size += right_buddy->total_size(*cache); - cache.store(this, metadata); - cache.store(right_buddy, Metadata(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); + cache->save(this, metadata); + cache->save(right_buddy, + MemoryBlock::Desc(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); } -void MemoryBlock::mark_as_free(MetadataCache& cache) { +void MemoryBlock::mark_as_free(MetadataCache* cache) { // check for double free or corruption - PADDLE_ASSERT(type(cache) != FREE_CHUNK); - PADDLE_ASSERT(type(cache) != INVALID_CHUNK); - + PADDLE_ASSERT(type(*cache) != FREE_CHUNK); + PADDLE_ASSERT(type(*cache) != INVALID_CHUNK); set_type(cache, FREE_CHUNK); } -void MemoryBlock::set_type(MetadataCache& cache, Type t) { - auto metadata = cache.load(this); - +void MemoryBlock::set_type(MetadataCache* cache, Type t) { + auto metadata = cache->load(this); metadata.type = t; - - cache.store(this, metadata); -} - -bool MemoryBlock::has_left_buddy(MetadataCache& cache) const { - return left_buddy(cache) != nullptr; -} - -bool MemoryBlock::has_right_buddy(MetadataCache& cache) const { - return right_buddy(cache) != nullptr; -} - -size_t MemoryBlock::index(MetadataCache& cache) const { - return cache.load(this).index; + cache->save(this, metadata); } void* MemoryBlock::data() const { - return const_cast(reinterpret_cast(this)) + 1; + return const_cast( + reinterpret_cast(this)) + + 1; } MemoryBlock* MemoryBlock::metadata() const { return const_cast(reinterpret_cast( - reinterpret_cast(this) - 1)); + reinterpret_cast(this) - 1)); } } // namespace detail diff --git a/paddle/fluid/memory/detail/memory_block.h b/paddle/fluid/memory/detail/memory_block.h index 72b40b73177d086aa912416e7f9cb3cd4ad5b45e..5cceba659beeec1b3c986dc43229f6725e3e11de 100644 --- a/paddle/fluid/memory/detail/memory_block.h +++ b/paddle/fluid/memory/detail/memory_block.h @@ -11,21 +11,21 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once -#include +#include +#include namespace paddle { namespace memory { namespace detail { -// Forward Declarations +// Forward declaration. class MetadataCache; -/*! \brief A class used to interpret the contents of a memory block */ -class MemoryBlock { - public: +// MemoryBlock represents Each allocated memory block, which contains +// MemoryBlock::Desc and the payload. +struct MemoryBlock { enum Type { FREE_CHUNK, // memory is free and idle ARENA_CHUNK, // memory is being occupied @@ -33,57 +33,96 @@ class MemoryBlock { INVALID_CHUNK // memory is invalid }; - public: - void init(MetadataCache& cache, Type t, size_t index, size_t size, + // init saves the MemoryBlock::Desc of the memory block in a MetadataCache. + // If it is a CPU memory block, the MetadataCache writes the + // MemoryBlock::Desc to the beginning of the block; or, if it is a GPU memory + // block, the MetadataCache writes the Meatadata to a std::map in + // the CPU. + void init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy); - public: - /*! \brief The type of the allocation */ - Type type(MetadataCache& cache) const; - - /*! \brief The size of the data region */ - size_t size(MetadataCache& cache) const; + // All these accessors returns fields in the MemoryBlock::Desc of the memory + // block. They all need a MetadataCache instance as their first + // parameter because they read the MemoryBlock::Desc from the cache. + Type type(const MetadataCache& cache) const; + size_t size(const MetadataCache& cache) const; + size_t index(const MetadataCache& cache) const; + size_t total_size(const MetadataCache& cache) const; + bool has_left_buddy(const MetadataCache& cache) const; + bool has_right_buddy(const MetadataCache& cache) const; + MemoryBlock* left_buddy(const MetadataCache& cache) const; + MemoryBlock* right_buddy(const MetadataCache& cache) const; - /*! \brief An index to track the allocator */ - size_t index(MetadataCache& cache) const; + // Split the allocation into left/right blocks. + void split(MetadataCache* cache, size_t size); - /*! \brief The total size of the block */ - size_t total_size(MetadataCache& cache) const; + // Merge left and right blocks together. + void merge(MetadataCache* cache, MemoryBlock* right_buddy); - /*! \brief Check the left buddy of the block */ - bool has_left_buddy(MetadataCache& cache) const; + // Mark the allocation as free. + void mark_as_free(MetadataCache* cache); - /*! \brief Check the right buddy of the block */ - bool has_right_buddy(MetadataCache& cache) const; - - /*! \brief Get the left buddy */ - MemoryBlock* left_buddy(MetadataCache& cache) const; - - /*! \brief Get the right buddy */ - MemoryBlock* right_buddy(MetadataCache& cache) const; - - public: - /*! \brief Split the allocation into left/right blocks */ - void split(MetadataCache& cache, size_t size); + // Change the type of the allocation. + void set_type(MetadataCache* cache, Type t); - /*! \brief Merge left and right blocks together */ - void merge(MetadataCache& cache, MemoryBlock* right_buddy); - - /*! \brief Mark the allocation as free */ - void mark_as_free(MetadataCache& cache); - - /*! \brief Change the type of the allocation */ - void set_type(MetadataCache& cache, Type t); - - public: - /*! \brief Get a pointer to the memory block's data */ void* data() const; - - /*! \brief Get a pointer to the memory block's metadata */ MemoryBlock* metadata() const; + // MemoryBlock::Desc describes a MemoryBlock. + struct Desc { + Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, + MemoryBlock* r); + Desc(); + + // Updates guard_begin and guard_end by hashes of the Metadata object. + void update_guards(); + + // Checks that guard_begin and guard_end are hashes of the Metadata object. + bool check_guards() const; + + // TODO(gangliao): compress this + size_t guard_begin = 0; + MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; + size_t index = 0; + size_t size = 0; + size_t total_size = 0; + MemoryBlock* left_buddy = nullptr; + MemoryBlock* right_buddy = nullptr; + size_t guard_end = 0; + }; +}; + +// A cache for accessing memory block meta-data that may be expensive +// to access directly. This class exists to unify the +// MemoryBlock::Desc format between GPU and CPU allocations. It should +// be removed when the CPU can access all GPU allocations directly via +// UVM. +class MetadataCache { public: - static size_t overhead(); + explicit MetadataCache(bool uses_gpu); + + // Disable copying and assignment. + MetadataCache(const MetadataCache&) = delete; + MetadataCache& operator=(const MetadataCache&) = delete; + + // Returns the MemoryBlock::Desc for a memory block. When MetadataCache is + // used to manage CPU memory, the MemoryBlock::Desc resides at the beginning + // of the memory block; when used to manage GPU memory, the + // Meatadata resides in CPU memory indexed by cache_. + MemoryBlock::Desc load(const MemoryBlock* memory_block) const; + + // Saves the MemoryBlock::Desc of a memory block into the cache. For CPU + // memory block, writes the MemoryBlock::Desc to the beginning of the memory + // block; whereas for GPU memory, writes it to cache_. + void save(MemoryBlock* memory_block, const MemoryBlock::Desc& meta_data); + + // For GPU memory block, erases its MemoryBlock::Desc from cache_. + void invalidate(MemoryBlock* memory_block); + + private: + typedef std::unordered_map MetadataMap; + MetadataMap cache_; + bool uses_gpu_; }; } // namespace detail diff --git a/paddle/fluid/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/memory_block_desc.cc similarity index 54% rename from paddle/fluid/memory/detail/meta_data.cc rename to paddle/fluid/memory/detail/memory_block_desc.cc index ad862af1705835c495a30232aa2bba2d2a56ad89..393dd9209c0aa443cd17c29b2f9de6eafb48bac9 100644 --- a/paddle/fluid/memory/detail/meta_data.cc +++ b/paddle/fluid/memory/detail/memory_block_desc.cc @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_data.h" - #include +#include "paddle/fluid/memory/detail/memory_block.h" + namespace paddle { namespace memory { namespace detail { -Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, - MemoryBlock* l, MemoryBlock* r) +MemoryBlock::Desc::Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts, + MemoryBlock* l, MemoryBlock* r) : type(t), index(i), size(s), @@ -29,7 +29,7 @@ Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, left_buddy(l), right_buddy(r) {} -Metadata::Metadata() +MemoryBlock::Desc::Desc() : type(MemoryBlock::INVALID_CHUNK), index(0), size(0), @@ -37,32 +37,36 @@ Metadata::Metadata() left_buddy(nullptr), right_buddy(nullptr) {} +namespace { + template -inline void hash_combine(std::size_t& seed, const T& v) { +inline void hash_combine(std::size_t* seed, const T& v) { std::hash hasher; - seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + (*seed) ^= hasher(v) + 0x9e3779b9 + ((*seed) << 6) + ((*seed) >> 2); } -inline size_t hash(const Metadata* metadata, size_t initial_seed) { +inline size_t hash(const MemoryBlock::Desc& metadata, size_t initial_seed) { size_t seed = initial_seed; - hash_combine(seed, (size_t)metadata->type); - hash_combine(seed, metadata->index); - hash_combine(seed, metadata->size); - hash_combine(seed, metadata->total_size); - hash_combine(seed, metadata->left_buddy); - hash_combine(seed, metadata->right_buddy); + hash_combine(&seed, static_cast(metadata.type)); + hash_combine(&seed, metadata.index); + hash_combine(&seed, metadata.size); + hash_combine(&seed, metadata.total_size); + hash_combine(&seed, metadata.left_buddy); + hash_combine(&seed, metadata.right_buddy); return seed; } -void Metadata::update_guards() { - guard_begin = hash(this, 1); - guard_end = hash(this, 2); +} // namespace + +void MemoryBlock::Desc::update_guards() { + guard_begin = hash(*this, 1); + guard_end = hash(*this, 2); } -bool Metadata::check_guards() const { - return guard_begin == hash(this, 1) && guard_end == hash(this, 2); +bool MemoryBlock::Desc::check_guards() const { + return guard_begin == hash(*this, 1) && guard_end == hash(*this, 2); } } // namespace detail diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 43249e842ad4d2419fed041e6c9056021e9663cd..b86e4f38c42a26e155f276f9b73cbed1d0d83f7d 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_cache.h" #include "glog/logging.h" #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/platform/assert.h" @@ -23,29 +22,28 @@ namespace detail { MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {} -Metadata MetadataCache::load(const MemoryBlock* block) { +MemoryBlock::Desc MetadataCache::load(const MemoryBlock* block) const { if (uses_gpu_) { - auto existing_metadata = cache_.find(block); - PADDLE_ASSERT(existing_metadata->second.check_guards()); - return existing_metadata->second; + auto existing_desc = cache_.find(block); + PADDLE_ASSERT(existing_desc->second.check_guards()); + return existing_desc->second; } else { - auto* meta = reinterpret_cast(block); - VLOG(10) << "Load MetaData type=" << meta->type; - PADDLE_ASSERT(meta->check_guards()); - return *reinterpret_cast(block); + auto* desc = reinterpret_cast(block); + VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type; + PADDLE_ASSERT(desc->check_guards()); + return *reinterpret_cast(block); } } -void MetadataCache::store(MemoryBlock* block, - const Metadata& original_metadata) { - auto metadata = original_metadata; - - metadata.update_guards(); +void MetadataCache::save(MemoryBlock* block, + const MemoryBlock::Desc& original_desc) { + auto desc = original_desc; + desc.update_guards(); if (uses_gpu_) { - cache_[block] = metadata; + cache_[block] = desc; } else { - *reinterpret_cast(block) = metadata; + *reinterpret_cast(block) = desc; } } diff --git a/paddle/fluid/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h deleted file mode 100644 index 3283d756a6e7f7f1750442039797846bdad51125..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/detail/meta_cache.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -/** - * \brief A cache for accessing memory block meta-data that may be expensive - * to access directly. - * - * \note This class exists to unify the metadata format between GPU and CPU - * allocations. It should be removed when the CPU can access all GPU - * allocations directly via UVM. - */ -class MetadataCache { - public: - explicit MetadataCache(bool uses_gpu); - - public: - /*! \brief Load the associated metadata for the specified memory block. */ - Metadata load(const MemoryBlock* memory_block); - - /*! \brief Store the associated metadata for the specified memory block. */ - void store(MemoryBlock* memory_block, const Metadata& meta_data); - - /*! \brief Indicate that the specified metadata will no longer be used. */ - void invalidate(MemoryBlock* memory_block); - - public: - MetadataCache(const MetadataCache&) = delete; - MetadataCache& operator=(const MetadataCache&) = delete; - - private: - bool uses_gpu_; - - private: - typedef std::unordered_map MetadataMap; - - private: - MetadataMap cache_; -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h deleted file mode 100644 index 14895ee8727e98186b1f1295321951e12753fef6..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/detail/meta_data.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -class Metadata { - public: - Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, - MemoryBlock* r); - Metadata(); - - public: - /*! \brief Update the guards when metadata is changed */ - void update_guards(); - - /*! \brief Check consistency to previous modification */ - bool check_guards() const; - - public: - // TODO(gangliao): compress this - // clang-format off - size_t guard_begin = 0; - MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; - size_t index = 0; - size_t size = 0; - size_t total_size = 0; - MemoryBlock* left_buddy = nullptr; - MemoryBlock* right_buddy = nullptr; - size_t guard_end = 0; - // clang-format on -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 71d28dcbade1bcb48d2e906c61e03236860cb7d0..d5390529163491c2711e50ffad236534e88b73ee 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -13,15 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/system_allocator.h" -#include "paddle/fluid/platform/assert.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include // for malloc and free #include // for mlock and munlock #include // for std::max #include "gflags/gflags.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" // If use_pinned_memory is true, CPUAllocator calls mlock, which // returns pinned and locked memory as staging areas for data exchange @@ -34,13 +35,13 @@ namespace paddle { namespace memory { namespace detail { -void* CPUAllocator::Alloc(size_t& index, size_t size) { +void* CPUAllocator::Alloc(size_t* index, size_t size) { // According to http://www.cplusplus.com/reference/cstdlib/malloc/, // malloc might not return nullptr if size is zero, but the returned // pointer shall not be dereferenced -- so we make it nullptr. if (size <= 0) return nullptr; - index = 0; // unlock memory + *index = 0; // unlock memory void* p; @@ -55,7 +56,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { if (p != nullptr) { if (FLAGS_use_pinned_memory) { - index = 1; + *index = 1; mlock(p, size); // lock memory } } @@ -74,14 +75,25 @@ bool CPUAllocator::UseGpu() const { return false; } #ifdef PADDLE_WITH_CUDA -void* GPUAllocator::Alloc(size_t& index, size_t size) { +void* GPUAllocator::Alloc(size_t* index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr // if size is 0. We just make sure it does. if (size <= 0) return nullptr; void* p; + int prev_id; + cudaGetDevice(&prev_id); + if (prev_id != gpu_id_) { + cudaSetDevice(gpu_id_); + } + cudaError_t result = cudaMalloc(&p, size); + + if (prev_id != gpu_id_) { + cudaSetDevice(prev_id); + } + if (result == cudaSuccess) { - index = 0; + *index = 0; gpu_alloc_size_ += size; return p; } else { @@ -121,23 +133,33 @@ bool GPUAllocator::UseGpu() const { return true; } // PINNED memory allows direct DMA transfers by the GPU to and from system // memory. It’s locked to a physical address. -void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { +void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { if (size <= 0) return nullptr; - void* p; - // NOTE: here, we use GpuMaxAllocSize() as the maximum memory size + + // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size // of host pinned allocation. Allocates too much would reduce // the amount of memory available to the underlying system for paging. + size_t usable = + paddle::platform::CUDAPinnedMaxAllocSize() - cuda_pinnd_alloc_size_; - size_t usable = paddle::platform::GpuMaxAllocSize() - fallback_alloc_size_; - - if (size > usable) return nullptr; + if (size > usable) { + LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0 + << " MB pinned memory." + << ", available " << usable / 1024.0 / 1024.0 << " MB"; + return nullptr; + } + void* p; // PINNED memory is visible to all CUDA contexts. cudaError_t result = cudaMallocHost(&p, size); + if (result == cudaSuccess) { - index = 1; - fallback_alloc_size_ += size; + *index = 1; // PINNED memory + cuda_pinnd_alloc_size_ += size; return p; + } else { + LOG(WARNING) << "cudaMallocHost failed."; + return nullptr; } return nullptr; @@ -147,8 +169,8 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { cudaError_t err; PADDLE_ASSERT(index == 1); - PADDLE_ASSERT(fallback_alloc_size_ >= size); - fallback_alloc_size_ -= size; + PADDLE_ASSERT(cuda_pinnd_alloc_size_ >= size); + cuda_pinnd_alloc_size_ -= size; err = cudaFreeHost(p); // Purposefully allow cudaErrorCudartUnloading, because @@ -161,7 +183,7 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { } } -bool CUDAPinnedAllocator::UseGpu() const { return true; } +bool CUDAPinnedAllocator::UseGpu() const { return false; } #endif diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index 3e024125fabb8bbff094ed4455f164dfd4cae163..a0386a2dad1bb7faf54197a47ca7a5b6d9488817 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -21,21 +21,22 @@ namespace memory { namespace detail { /** - * \brief SystemAllocator is the parent class of CPUAllocator and GPUAllocator. - * A BuddyAllocator object uses a SystemAllocator* pointing to the + * \brief SystemAllocator is the parent class of CPUAllocator, + * CUDAPinnedAllocator and GPUAllocator. A BuddyAllocator + * object uses a SystemAllocator* pointing to the * underlying system allocator. */ class SystemAllocator { public: virtual ~SystemAllocator() {} - virtual void* Alloc(size_t& index, size_t size) = 0; + virtual void* Alloc(size_t* index, size_t size) = 0; virtual void Free(void* p, size_t size, size_t index) = 0; virtual bool UseGpu() const = 0; }; class CPUAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; }; @@ -43,25 +44,26 @@ class CPUAllocator : public SystemAllocator { #ifdef PADDLE_WITH_CUDA class GPUAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} + + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; private: size_t gpu_alloc_size_ = 0; size_t fallback_alloc_size_ = 0; + int gpu_id_; }; class CUDAPinnedAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; private: - size_t gpu_alloc_size_ = - 0; // TODO(zcd): how to define the upper limit of CUDAPinnedMemory? - size_t fallback_alloc_size_ = 0; + size_t cuda_pinnd_alloc_size_ = 0; }; #endif diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index d5df9e6897e9e788f14d2625e424c13949eeaa26..268260142c579ea9301d89fcec1613ce5b0e15a5 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -22,11 +22,11 @@ limitations under the License. */ DECLARE_bool(use_pinned_memory); -void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { +void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { bool freed = false; { size_t index; - void* p = a.Alloc(index, size); + void* p = a->Alloc(&index, size); if (size > 0) { EXPECT_NE(p, nullptr); } else { @@ -36,7 +36,7 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { int* i = static_cast(p); std::shared_ptr ptr(i, [&](void* p) { freed = true; - a.Free(p, size, index); + a->Free(p, size, index); }); } EXPECT_TRUE(freed); @@ -45,21 +45,21 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { TEST(CPUAllocator, NoLockMem) { FLAGS_use_pinned_memory = false; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } TEST(CPUAllocator, LockMem) { FLAGS_use_pinned_memory = true; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #ifdef PADDLE_WITH_CUDA TEST(GPUAllocator, Alloc) { - paddle::memory::detail::GPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + paddle::memory::detail::GPUAllocator a(0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #endif diff --git a/paddle/fluid/memory/memory.cc b/paddle/fluid/memory/malloc.cc similarity index 69% rename from paddle/fluid/memory/memory.cc rename to paddle/fluid/memory/malloc.cc index f2d5f250bfb56fb522416e83ab4c5315a9f533f0..0c74f62de5c6f5d432ee928945db6dcf385ca209 100644 --- a/paddle/fluid/memory/memory.cc +++ b/paddle/fluid/memory/malloc.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/memory/malloc.h" #include "glog/logging.h" @@ -38,8 +38,7 @@ BuddyAllocator* GetCPUBuddyAllocator() { } template <> -void* Alloc(platform::CPUPlace place, size_t size, - bool is_pinned) { +void* Alloc(platform::CPUPlace place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); void* p = GetCPUBuddyAllocator()->Alloc(size); VLOG(10) << " pointer=" << p; @@ -47,8 +46,7 @@ void* Alloc(platform::CPUPlace place, size_t size, } template <> -void Free(platform::CPUPlace place, void* p, - bool is_pinned) { +void Free(platform::CPUPlace place, void* p) { VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); GetCPUBuddyAllocator()->Free(p); } @@ -71,31 +69,7 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { } platform::SetDeviceId(gpu_id); if (!as[gpu_id]) { - as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); - VLOG(10) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 - << "% of GPU memory.\n" - << "You can set GFlags environment variable '" - << "FLAGS_fraction_of_gpu_memory_to_use" - << "' to change the fraction of GPU usage.\n\n"; - } - return as[gpu_id]; -} - -BuddyAllocator* GetCUDAPinnedBuddyAllocator(int gpu_id) { - static BuddyAllocator** as = NULL; - if (as == NULL) { - int gpu_num = platform::GetCUDADeviceCount(); - as = new BuddyAllocator*[gpu_num]; - for (int gpu = 0; gpu < gpu_num; gpu++) { - as[gpu] = nullptr; - } - } - platform::SetDeviceId(gpu_id); - if (!as[gpu_id]) { - as[gpu_id] = new BuddyAllocator(new detail::CUDAPinnedAllocator, + as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator(gpu_id), platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); VLOG(10) << "\n\nNOTE: each GPU device use " @@ -114,22 +88,14 @@ size_t Used(platform::CUDAPlace place) { } template <> -void* Alloc(platform::CUDAPlace place, size_t size, - bool is_pinned) { - void* ptr; - if (is_pinned) { - auto* buddy_allocator = GetCUDAPinnedBuddyAllocator(place.device); - ptr = buddy_allocator->Alloc(size); - } else { - auto* buddy_allocator = GetGPUBuddyAllocator(place.device); - ptr = buddy_allocator->Alloc(size); - } - +void* Alloc(platform::CUDAPlace place, size_t size) { + auto* buddy_allocator = GetGPUBuddyAllocator(place.device); + auto* ptr = buddy_allocator->Alloc(size); if (ptr == nullptr) { int cur_dev = platform::GetCurrentDeviceId(); platform::SetDeviceId(place.device); size_t avail, total; - platform::GpuMemoryUsage(avail, total); + platform::GpuMemoryUsage(&avail, &total); LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU " << place.device << ", available " << avail << " bytes"; LOG(WARNING) << "total " << total; @@ -142,15 +108,42 @@ void* Alloc(platform::CUDAPlace place, size_t size, } template <> -void Free(platform::CUDAPlace place, void* p, - bool is_pinned) { - if (is_pinned) { - GetCUDAPinnedBuddyAllocator(place.device)->Free(p); - } else { - GetGPUBuddyAllocator(place.device)->Free(p); +void Free(platform::CUDAPlace place, void* p) { + GetGPUBuddyAllocator(place.device)->Free(p); +} + +BuddyAllocator* GetCUDAPinnedBuddyAllocator() { + static BuddyAllocator* ba = NULL; + if (ba == NULL) { + ba = new BuddyAllocator(new detail::CUDAPinnedAllocator, + platform::CUDAPinnedMinChunkSize(), + platform::CUDAPinnedMaxChunkSize()); } + return ba; } +template <> +size_t Used(platform::CUDAPinnedPlace place) { + return GetCUDAPinnedBuddyAllocator()->Used(); +} + +template <> +void* Alloc(platform::CUDAPinnedPlace place, + size_t size) { + auto* buddy_allocator = GetCUDAPinnedBuddyAllocator(); + void* ptr = buddy_allocator->Alloc(size); + + if (ptr == nullptr) { + LOG(WARNING) << "cudaMallocHost Cannot allocate " << size + << " bytes in CUDAPinnedPlace"; + } + return ptr; +} + +template <> +void Free(platform::CUDAPinnedPlace place, void* p) { + GetCUDAPinnedBuddyAllocator()->Free(p); +} #endif size_t Usage::operator()(const platform::CPUPlace& cpu) const { @@ -165,6 +158,14 @@ size_t Usage::operator()(const platform::CUDAPlace& gpu) const { #endif } +size_t Usage::operator()(const platform::CUDAPinnedPlace& cuda_pinned) const { +#ifdef PADDLE_WITH_CUDA + return Used(cuda_pinned); +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} + size_t memory_usage(const platform::Place& p) { return boost::apply_visitor(Usage(), p); } diff --git a/paddle/fluid/memory/malloc.h b/paddle/fluid/memory/malloc.h new file mode 100644 index 0000000000000000000000000000000000000000..3e6bfddd69cb16edf323d040ea5369cd551f299e --- /dev/null +++ b/paddle/fluid/memory/malloc.h @@ -0,0 +1,104 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace memory { + +/** + * \brief Allocate memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] size Allocation size. + * + * \return Allocated memory block address. + * + * \note If return nullptr, it indicates memory allocation failed + * because insufficient memory in current system. When Alloc + * function is invoked, you must check the returned memory + * address is valid or not. + */ +template +void* Alloc(Place place, size_t size); + +/** + * \brief Free memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] ptr Memory block address to free. + * + */ +template +void Free(Place place, void* ptr); + +/** + * \brief Total size of used memory in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * + */ +template +size_t Used(Place place); + +struct Usage : public boost::static_visitor { + size_t operator()(const platform::CPUPlace& cpu) const; + size_t operator()(const platform::CUDAPlace& gpu) const; + size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const; +}; + +size_t memory_usage(const platform::Place& p); + +/** + * \brief Free memory block in one place. + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ +template +class PODDeleter { + static_assert(std::is_pod::value, "T must be POD"); + + public: + explicit PODDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, static_cast(ptr)); } + + private: + Place place_; +}; + +/** + * \brief Free memory block in one place does not meet POD + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ +template +class PlainDeleter { + public: + explicit PlainDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } + + private: + Place place_; +}; + +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/malloc_test.cc similarity index 68% rename from paddle/fluid/memory/memory_test.cc rename to paddle/fluid/memory/malloc_test.cc index eb27a52b254c1cda065197746eb179bbd1d7f2f1..d39466ef60c3750600dea726a6570397423d42f6 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/malloc_test.cc @@ -12,23 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/memory.h" -#include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" +#include "paddle/fluid/memory/malloc.h" + +#include +#include "gtest/gtest.h" +#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" -#include -#include - inline bool is_aligned(void const *p) { return 0 == (reinterpret_cast(p) & 0x3); } size_t align(size_t size, paddle::platform::CPUPlace place) { - size += sizeof(paddle::memory::detail::Metadata); + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); size_t alignment = paddle::platform::CpuMinChunkSize(); size_t remaining = size % alignment; return remaining == 0 ? size : size + (alignment - remaining); @@ -86,7 +85,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { #ifdef PADDLE_WITH_CUDA size_t align(size_t size, paddle::platform::CUDAPlace place) { - size += sizeof(paddle::memory::detail::Metadata); + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); size_t alignment = paddle::platform::GpuMinChunkSize(); size_t remaining = size % alignment; return remaining == 0 ? size : size + (alignment - remaining); @@ -141,4 +140,59 @@ TEST(BuddyAllocator, GPUMultAlloc) { } } +size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) { + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); + size_t alignment = paddle::platform::CUDAPinnedMinChunkSize(); + size_t remaining = size % alignment; + return remaining == 0 ? size : size + (alignment - remaining); +} + +TEST(BuddyAllocator, CUDAPinnedAllocator) { + void *p = nullptr; + + EXPECT_EQ(p, nullptr); + + paddle::platform::CUDAPinnedPlace cpu; + p = paddle::memory::Alloc(cpu, 4096); + + EXPECT_NE(p, nullptr); + + paddle::platform::Place place = cpu; + EXPECT_EQ(paddle::memory::Used(cpu), paddle::memory::memory_usage(place)); + + paddle::memory::Free(cpu, p); +} + +TEST(BuddyAllocator, CUDAPinnedMultAllocator) { + paddle::platform::CUDAPinnedPlace cpu; + + std::unordered_map ps; + + size_t total_size = paddle::memory::Used(cpu); + EXPECT_EQ(total_size, 0UL); + + for (auto size : + {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { + ps[paddle::memory::Alloc(cpu, size)] = size; + + // Buddy Allocator doesn't manage too large memory chunk + if (paddle::memory::Used(cpu) == total_size) continue; + + size_t aligned_size = align(size, cpu); + total_size += aligned_size; + EXPECT_EQ(total_size, paddle::memory::Used(cpu)); + } + + for (auto p : ps) { + EXPECT_EQ(is_aligned(p.first), true); + paddle::memory::Free(cpu, p.first); + + // Buddy Allocator doesn't manage too large memory chunk + if (paddle::memory::Used(cpu) == total_size) continue; + + size_t aligned_size = align(p.second, cpu); + total_size -= aligned_size; + EXPECT_EQ(total_size, paddle::memory::Used(cpu)); + } +} #endif diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index b991360d0442ec2d258443a931a9dcf10b332f1e..eddcaab8befda84dd14ed46c31ac025dfbcc7ca9 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -56,6 +56,45 @@ void Copy( } } +template <> +void Copy( + platform::CPUPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CPUPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CUDAPlace src_place, const void* src, size_t num, + cudaStream_t stream) { + platform::SetDeviceId(src_place.device); + platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyDeviceToHost, stream); +} + +template <> +void Copy( + platform::CUDAPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num, + cudaStream_t stream) { + platform::SetDeviceId(dst_place.device); + platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyHostToDevice, stream); +} + #endif } // namespace memory diff --git a/paddle/fluid/memory/memory.h b/paddle/fluid/memory/memory.h index 062bfc880e78dc5d90c567ffe5c4e521704c9ca6..8d904e3be56abf0974ba7379f7ca1b676fcb0409 100644 --- a/paddle/fluid/memory/memory.h +++ b/paddle/fluid/memory/memory.h @@ -14,92 +14,5 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/place.h" - -namespace paddle { -namespace memory { - -/** - * \brief Allocate memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] size Allocation size. - * - * \return Allocated memory block address. - * - * \note If return nullptr, it indicates memory allocation failed - * because insufficient memory in current system. When Alloc - * function is invoked, you must check the returned memory - * address is valid or not. - */ -template -void* Alloc(Place place, size_t size, bool is_pinned = false); - -/** - * \brief Free memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] ptr Memory block address to free. - * - */ -template -void Free(Place place, void* ptr, bool is_pinned = false); - -/** - * \brief Total size of used memory in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * - */ -template -size_t Used(Place place); - -struct Usage : public boost::static_visitor { - size_t operator()(const platform::CPUPlace& cpu) const; - size_t operator()(const platform::CUDAPlace& gpu) const; -}; - -size_t memory_usage(const platform::Place& p); - -/** - * \brief Free memory block in one place. - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PODDeleter { - static_assert(std::is_pod::value, "T must be POD"); - - public: - explicit PODDeleter(Place place, bool is_pinned = false) - : place_(place), is_pinned_(is_pinned) {} - void operator()(T* ptr) { Free(place_, static_cast(ptr), is_pinned_); } - - private: - Place place_; - bool is_pinned_; -}; - -/** - * \brief Free memory block in one place does not meet POD - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PlainDeleter { - public: - explicit PlainDeleter(Place place) : place_(place) {} - void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } - - private: - Place place_; -}; - -} // namespace memory -} // namespace paddle +#include "paddle/fluid/memory/malloc.h" +#include "paddle/fluid/memory/memcpy.h" diff --git a/paddle/fluid/memory/pinned_memory_test.cu b/paddle/fluid/memory/pinned_memory_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..0d898f59ee1b8c783c5357aa7e27581a993a6d30 --- /dev/null +++ b/paddle/fluid/memory/pinned_memory_test.cu @@ -0,0 +1,146 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include + +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/memory/memory.h" + +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" + +// This unit test is an example comparing the performance between using pinned +// memory and not. In general, using pinned memory will be faster. +template +__global__ void Kernel(T* output, int dim) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < dim) { + output[tid] = output[tid] * output[tid] / 100; + } +} + +template +float test_pinned_memory() { + Place cpu_place; + paddle::platform::CUDAPlace cuda_place; + + const int data_size = 4096; + const int iteration = 10; + + // create event start and end + cudaEvent_t start_e, stop_e, copying_e; + float elapsedTime = 0; + cudaEventCreate(&start_e); + cudaEventCreate(&stop_e); + cudaEventCreate(©ing_e); + + // create computation stream, data copying stream + cudaStream_t computation_stream, copying_stream; + cudaStreamCreate(&computation_stream); + cudaStreamCreate(©ing_stream); + + // create record event, pinned memory, gpu memory + std::vector record_event(iteration); + std::vector input_pinned_mem(iteration); + std::vector gpu_mem(iteration); + std::vector output_pinned_mem(iteration); + + // initial data + for (int j = 0; j < iteration; ++j) { + cudaEventCreateWithFlags(&record_event[j], cudaEventDisableTiming); + cudaEventCreate(&(record_event[j])); + input_pinned_mem[j] = static_cast( + paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); + output_pinned_mem[j] = static_cast( + paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); + gpu_mem[j] = static_cast( + paddle::memory::Alloc(cuda_place, data_size * sizeof(float))); + + for (int k = 0; k < data_size; ++k) { + input_pinned_mem[j][k] = k; + } + } + + cudaEventRecord(start_e, computation_stream); + + // computation + for (int m = 0; m < 30; ++m) { + for (int i = 0; i < iteration; ++i) { + // cpu -> GPU on computation stream. + // note: this operation is async for pinned memory. + paddle::memory::Copy(cuda_place, gpu_mem[i], cpu_place, + input_pinned_mem[i], data_size * sizeof(float), + computation_stream); + + // call kernel on computation stream. + Kernel<<<4, 1024, 0, computation_stream>>>(gpu_mem[i], data_size); + + // record event_computation on computation stream + cudaEventRecord(record_event[i], computation_stream); + + // wait event_computation on copy stream. + // note: this operation is async. + cudaStreamWaitEvent(copying_stream, record_event[i], 0); + + // copy data GPU->CPU, on copy stream. + // note: this operation is async for pinned memory. + paddle::memory::Copy(cpu_place, output_pinned_mem[i], cuda_place, + gpu_mem[i], data_size * sizeof(float), + copying_stream); + } + } + + cudaEventRecord(copying_e, copying_stream); + cudaStreamWaitEvent(computation_stream, copying_e, 0); + + cudaEventRecord(stop_e, computation_stream); + + cudaEventSynchronize(start_e); + cudaEventSynchronize(stop_e); + cudaEventElapsedTime(&elapsedTime, start_e, stop_e); + + // std::cout << cpu_place << " " + // << "time consume:" << elapsedTime / 30 << std::endl; + + for (int l = 0; l < iteration; ++l) { + for (int k = 0; k < data_size; ++k) { + float temp = input_pinned_mem[l][k]; + temp = temp * temp / 100; + EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]); + } + } + + // destroy resource + cudaEventDestroy(copying_e); + cudaEventDestroy(start_e); + cudaEventDestroy(stop_e); + for (int j = 0; j < 10; ++j) { + cudaEventDestroy((record_event[j])); + paddle::memory::Free(cpu_place, input_pinned_mem[j]); + paddle::memory::Free(cpu_place, output_pinned_mem[j]); + paddle::memory::Free(cuda_place, gpu_mem[j]); + } + return elapsedTime / 30; +} + +TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) { + // Generally speaking, operation on pinned_memory is faster than that on + // unpinned-memory, but if this unit test fails frequently, please close this + // test for the time being. + float time1 = test_pinned_memory(); + float time2 = test_pinned_memory(); + EXPECT_GT(time1, time2); +} diff --git a/paddle/fluid/operators/.clang-format b/paddle/fluid/operators/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 8341170d6897d71ddf95d4de95f521f5d31ab7cd..7d6781c2c38822eaabb64eda9c76ff657bbdeeb8 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -3,8 +3,8 @@ string(REPLACE "_mkldnn" "" GENERAL_OPS "${GENERAL_OPS}") string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}") list(REMOVE_DUPLICATES GENERAL_OPS) set(DEPS_OPS "") -set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/pybind.h) -file(WRITE ${pybind_file} "// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT!\n\n") +set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h) +file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt. DO NOT EDIT!\n\n") function(op_library TARGET) # op_library is a function to create op library. The interface is same as # cc_library. But it handle split GPU/CPU code and link some common library @@ -100,7 +100,7 @@ function(op_library TARGET) endif() # Define operators that don't need pybind here. - foreach(manual_pybind_op "net_op" "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op") + foreach(manual_pybind_op "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op") if ("${TARGET}" STREQUAL "${manual_pybind_op}") set(pybind_flag 1) endif() @@ -183,6 +183,8 @@ if(WITH_DISTRIBUTE) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") op_library(send_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(send_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + op_library(prefetch_op DEPS ${DISTRIBUTE_DEPS}) + set_source_files_properties(prefetch_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(recv_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(recv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) @@ -191,12 +193,12 @@ if(WITH_DISTRIBUTE) set_source_files_properties(send_vars_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_barrier_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(send_barrier_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS send_op listen_and_serv_op sum_op executor) + set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor) else() - set(DEPS_OPS ${DEPS_OPS} send_op recv_op listen_and_serv_op send_vars_op send_barrier_op) + set(DEPS_OPS ${DEPS_OPS} send_op prefetch_op recv_op listen_and_serv_op send_vars_op send_barrier_op) endif() -op_library(cond_op DEPS framework_proto tensor net_op) op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) op_library(softmax_op DEPS softmax) @@ -243,9 +245,17 @@ op_library(channel_send_op DEPS concurrency) op_library(channel_recv_op DEPS concurrency) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) + +# The fully connected layer is deleted when the WITH_MKLDNN flag is OFF +# Because the fully connected layer has only one MKLDNN's operator +if(NOT WITH_MKLDNN) + list(REMOVE_ITEM GENERAL_OPS fc_op) +endif(NOT WITH_MKLDNN) + foreach(src ${GENERAL_OPS}) op_library(${src}) endforeach() + file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n") add_subdirectory(reader) @@ -256,11 +266,10 @@ endforeach() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") cc_test(gather_test SRCS gather_test.cc DEPS tensor) -cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) -cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory) +cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 6ff363d766db7dd97e1bc193ef7b4a095a7b7c24..ab7c61227114fe7a0ce2ff2515dd560706058b64 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -13,8 +13,8 @@ limitations under the License. */ #include "mkldnn.hpp" -#include "mkldnn_activation_op.h" #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/mkldnn_activation_op.h" namespace paddle { namespace operators { @@ -40,18 +40,24 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, const T *dst_data = dst->template mutable_data(ctx.GetPlace()); // get memory dim - PADDLE_ENFORCE(src->dims().size() == 4, - "Input dim must be with 4, i.e. NCHW"); + PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, + "Input dim must be with 2 or 4"); std::vector src_tz = framework::vectorize2int(src->dims()); // create memory description - // TODO(kbinias-intel): support more formats - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src_data); - auto dst_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)dst_data); + auto src_memory = + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(src_data))); + auto dst_memory = + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(dst_data))); auto forward_desc = mkldnn::eltwise_forward::desc( mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); @@ -91,15 +97,21 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, std::vector src_tz = framework::vectorize2int(x->dims()); // create memory description - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src); + auto src_memory = mkldnn::memory( + {data_md, mkldnn_engine}, static_cast(const_cast(src))); auto diff_src_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_src); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_src))); auto diff_dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_dst); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_dst))); auto backward_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 979115eee0dbe157dbcf2293d914cc250b35d22e..b261144f3d7836801e0b7a45a1478d3b801db86d 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -260,6 +260,36 @@ $out = floor(x)$ } }; +class CosOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CosOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Cosine operator"); + AddOutput("Out", "Output of Cosine operator"); + AddComment(R"DOC( +Cosine Activation Operator. + +$out = cos(x)$ + +)DOC"); + } +}; + +class SinOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SinOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Sine operator"); + AddOutput("Out", "Output of Sine operator"); + AddComment(R"DOC( +Sine Activation Operator. + +$out = sin(x)$ + +)DOC"); + } +}; + class RoundOpMaker : public framework::OpProtoAndCheckerMaker { public: RoundOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -561,6 +591,12 @@ REGISTER_OP(ceil, ops::ActivationOp, ops::CeilOpMaker, ceil_grad, REGISTER_OP(floor, ops::ActivationOp, ops::FloorOpMaker, floor_grad, ops::ActivationOpGrad); +REGISTER_OP(cos, ops::ActivationOp, ops::CosOpMaker, cos_grad, + ops::ActivationOpGrad); + +REGISTER_OP(sin, ops::ActivationOp, ops::SinOpMaker, sin_grad, + ops::ActivationOpGrad); + REGISTER_OP(round, ops::ActivationOp, ops::RoundOpMaker, round_grad, ops::ActivationOpGrad); @@ -626,14 +662,3 @@ REGISTER_OP(swish, ops::ActivationOp, ops::SwishOpMaker, swish_grad, ops::grad_functor>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); - -REGISTER_OP_CPU_KERNEL(relu, - ops::ActivationKernel>, - ops::ActivationKernel>); -REGISTER_OP_CPU_KERNEL( - relu_grad, ops::ActivationGradKernel>, - ops::ActivationGradKernel>); diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index 7709a551dc155e1f3cd2a19a689999608f497beb..4f745553c14fc1391bc65d4f7e4f9bd3b5a881c2 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,31 +14,19 @@ limitations under the License. */ #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; - -#define REGISTER_ACTIVATION_CUDA_KERNEL(act_type, functor, grad_functor) \ - REGISTER_OP_CUDA_KERNEL( \ - act_type, ops::ActivationKernel>, \ - ops::ActivationKernel>); \ - REGISTER_OP_CUDA_KERNEL( \ - act_type##_grad, \ - ops::ActivationGradKernel>, \ - ops::ActivationGradKernel>, \ + ops::ActivationKernel>, \ + ops::ActivationKernel>); \ + REGISTER_OP_CUDA_KERNEL( \ + act_type##_grad, ops::ActivationGradKernel>, \ + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CUDA_KERNEL); - -REGISTER_OP_CUDA_KERNEL( - relu, ops::ActivationKernel>, - ops::ActivationKernel>, - ops::ActivationKernel>); -REGISTER_OP_CUDA_KERNEL( - relu_grad, ops::ActivationGradKernel>, - ops::ActivationGradKernel>); diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 4c575b4a7b551be2d1288f7fec0a2821fc10c40d..43856780bf9357281ac4af2968950da15426e5c8 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -1,11 +1,8 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,9 +10,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" @@ -331,6 +332,68 @@ struct FloorFunctor : public BaseActivationFunctor { } }; +template +struct Sine { + HOSTDEVICE T operator()(const T& val) const { return sin(val); } +}; + +template <> +struct Sine { + HOSTDEVICE platform::float16 operator()(const platform::float16& val) const { + return platform::float16(sin(static_cast(val))); + } +}; + +template +struct Cosine { + HOSTDEVICE T operator()(const T& val) const { return cos(val); } +}; + +template <> +struct Cosine { + HOSTDEVICE platform::float16 operator()(const platform::float16& val) const { + return platform::float16(cos(static_cast(val))); + } +}; + +// cosine'(x) = -sin(x) +template +struct CosGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = -dout * x.unaryExpr(Sine()); + } +}; + +// cosine(x) = cos(x) +template +struct CosFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.unaryExpr(Cosine()); + } +}; + +// sine'(x) = cos(x) +template +struct SinGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * x.unaryExpr(Cosine()); + } +}; + +// sine(x) = sin(x) +template +struct SinFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.unaryExpr(Sine()); + } +}; + // round(x) = [x] template struct RoundFunctor : public BaseActivationFunctor { @@ -776,12 +839,15 @@ struct SwishGradFunctor : public BaseActivationFunctor { __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ __macro(logsigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor); \ __macro(exp, ExpFunctor, ExpGradFunctor); \ + __macro(relu, ReluFunctor, ReluGradFunctor); \ __macro(tanh, TanhFunctor, TanhGradFunctor); \ __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ __macro(abs, AbsFunctor, AbsGradFunctor); \ __macro(ceil, CeilFunctor, ZeroGradFunctor); \ __macro(floor, FloorFunctor, ZeroGradFunctor); \ + __macro(cos, CosFunctor, CosGradFunctor); \ + __macro(sin, SinFunctor, SinGradFunctor); \ __macro(round, RoundFunctor, ZeroGradFunctor); \ __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ __macro(log, LogFunctor, LogGradFunctor); \ diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index c990fe784380bf78a7f3594c0f49ef5e06e6caea..0153e1253b00ded21a7a14e37faf5a76d904d8d1 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/adagrad_op.h" +#include #include diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index dbcc7abb0996268b5a3571ba113d9cc56f6f65a3..4309f0a5497456065e5c43bc8f7b265fa711f699 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index e8123cb1a490be642d1061bba8129f63e681d3c3..993610fdedde4bafd99f59a0adeeeef4526eb089 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -13,6 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/assign_value_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index c7b1a55a5cd52bd2bacbdea3ee22c75c2a2c12d5..e749d6f6d3685f207f0ad4f2ebc7c3c7ae32992c 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -14,6 +14,7 @@ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index 71de78b1181daf4bd0b6d73508638857bafcf560..a168eaeab56128b75bbe97d7ccf843a081b5dced 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/auc_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/auc_op.h index f4e8208c3f2e238a4acecab4579fc955092d5978..8b016c3d31ad83e66baeb298c61840cc529efa1e 100644 --- a/paddle/fluid/operators/auc_op.h +++ b/paddle/fluid/operators/auc_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -40,7 +42,7 @@ class AucKernel : public framework::OpKernel { std::vector thresholds_list; thresholds_list.reserve(num_thresholds); for (int i = 1; i < num_thresholds - 1; i++) { - thresholds_list[i] = (float)i / (num_thresholds - 1); + thresholds_list[i] = static_cast(i) / (num_thresholds - 1); } const float kEpsilon = 1e-7; thresholds_list[0] = 0.0f - kEpsilon; @@ -105,11 +107,12 @@ class AucKernel : public framework::OpKernel { float* fp_rate_data = fp_rate.mutable_data(ctx.GetPlace()); float* rec_rate_data = rec_rate.mutable_data(ctx.GetPlace()); for (int i = 0; i < num_thresholds; i++) { - tp_rate_data[i] = - ((float)tp_data[i] + epsilon) / (tp_data[i] + fn_data[i] + epsilon); - fp_rate_data[i] = (float)fp_data[i] / (fp_data[i] + tn_data[i] + epsilon); - rec_rate_data[i] = - ((float)tp_data[i] + epsilon) / (tp_data[i] + fp_data[i] + epsilon); + tp_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + (tp_data[i] + fn_data[i] + epsilon); + fp_rate_data[i] = + static_cast(fp_data[i]) / (fp_data[i] + tn_data[i] + epsilon); + rec_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + (tp_data[i] + fp_data[i] + epsilon); } *auc_data = 0.0f; if (curve == "ROC") { diff --git a/paddle/fluid/operators/average_accumulates_op.cc b/paddle/fluid/operators/average_accumulates_op.cc index c95077fcbdb6b6c0da31f30b795dbe4d7d4fe6fe..b21deaf9258567c05a8816b14ac7d6462964e8ba 100644 --- a/paddle/fluid/operators/average_accumulates_op.cc +++ b/paddle/fluid/operators/average_accumulates_op.cc @@ -19,15 +19,15 @@ namespace operators { template <> void GetAccumulators( - const framework::ExecutionContext& ctx, int64_t& num_updates_, - int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t* num_updates_, + int64_t* num_accumulates_, int64_t* old_num_accumulates_) { auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); - old_num_accumulates_ = in_old_num_accumulates->data()[0]; - num_accumulates_ = in_num_accumulates->data()[0]; - num_updates_ = in_num_updates->data()[0]; + *old_num_accumulates_ = in_old_num_accumulates->data()[0]; + *num_accumulates_ = in_num_accumulates->data()[0]; + *num_updates_ = in_num_updates->data()[0]; } template <> diff --git a/paddle/fluid/operators/average_accumulates_op.cu b/paddle/fluid/operators/average_accumulates_op.cu index 270c46984465e5ca62eaa8da3955ce7a3eaa0c57..104e24f6ee2e2503d98f3a3991a903d8dbc4bdfe 100644 --- a/paddle/fluid/operators/average_accumulates_op.cu +++ b/paddle/fluid/operators/average_accumulates_op.cu @@ -19,18 +19,20 @@ namespace paddle { namespace operators { template <> void GetAccumulators( - const framework::ExecutionContext& ctx, int64_t& num_updates_, - int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t* num_updates_, + int64_t* num_accumulates_, int64_t* old_num_accumulates_) { auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); auto stream = ctx.cuda_device_context().stream(); - memory::Copy(platform::CPUPlace(), &old_num_accumulates_, - platform::CUDAPlace(), in_old_num_accumulates->data(), - sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), &num_accumulates_, platform::CUDAPlace(), + auto cuda_place = + boost::get(in_old_num_accumulates->place()); + memory::Copy(platform::CPUPlace(), old_num_accumulates_, cuda_place, + in_old_num_accumulates->data(), sizeof(int64_t), + stream); + memory::Copy(platform::CPUPlace(), num_accumulates_, cuda_place, in_num_accumulates->data(), sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), &num_updates_, platform::CUDAPlace(), + memory::Copy(platform::CPUPlace(), num_updates_, cuda_place, in_num_updates->data(), sizeof(int64_t), stream); } @@ -42,14 +44,16 @@ void SetAccumulators( auto* out_old_num_accumulates = ctx.Output("out_old_num_accumulates"); auto* out_num_accumulates = ctx.Output("out_num_accumulates"); auto* out_num_updates = ctx.Output("out_num_updates"); + auto cuda_place = + boost::get(out_old_num_accumulates->place()); - memory::Copy(platform::CUDAPlace(), out_old_num_accumulates->data(), + memory::Copy(cuda_place, out_old_num_accumulates->data(), platform::CPUPlace(), &old_num_accumulates_, sizeof(int64_t), stream); - memory::Copy(platform::CUDAPlace(), out_num_accumulates->data(), + memory::Copy(cuda_place, out_num_accumulates->data(), platform::CPUPlace(), &num_accumulates_, sizeof(int64_t), stream); - memory::Copy(platform::CUDAPlace(), out_num_updates->data(), + memory::Copy(cuda_place, out_num_updates->data(), platform::CPUPlace(), &num_updates_, sizeof(int64_t), stream); } diff --git a/paddle/fluid/operators/average_accumulates_op.h b/paddle/fluid/operators/average_accumulates_op.h index f858109d1428dc67d94c253e5a39818eb2d4560d..07ac5ced11605f6d0d5164d1c0f69acbd7bbed60 100644 --- a/paddle/fluid/operators/average_accumulates_op.h +++ b/paddle/fluid/operators/average_accumulates_op.h @@ -29,8 +29,8 @@ using EigenVector = framework::EigenVector; template void GetAccumulators(const framework::ExecutionContext& ctx, - int64_t& num_updates, int64_t& num_accumulates, - int64_t& old_num_accumulates); + int64_t* num_updates, int64_t* num_accumulates, + int64_t* old_num_accumulates); template void SetAccumulators(const framework::ExecutionContext& ctx, @@ -47,8 +47,8 @@ class AverageAccumulatesKernel : public framework::OpKernel { int64_t num_updates = 0; int64_t num_accumulates = 0; int64_t old_num_accumulates = 0; - GetAccumulators(ctx, num_updates, num_accumulates, - old_num_accumulates); + GetAccumulators(ctx, &num_updates, &num_accumulates, + &old_num_accumulates); // Get attrs float average_window = ctx.Attr("average_window"); diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 36049ee6a4a0d2a251b6d10cf1ff05a9d9845089..c9939e8602ed341d37784ca292a55326899e8e65 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" +#include #include "paddle/fluid/framework/data_layout.h" namespace paddle { diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 6ceacc39924a7558e380aaf563aaf234f1bf30a5..cb1927bc0f2eb735f0a3184df5f0f8fada2f9dca 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -13,9 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" -#include "paddle/fluid/framework/data_layout.h" - #include +#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" @@ -115,23 +114,11 @@ class BatchNormKernel const auto *bias = ctx.Input("Bias"); auto *y = ctx.Output("Y"); - auto *mean_out = ctx.Output("MeanOut"); - auto *variance_out = ctx.Output("VarianceOut"); - auto *saved_mean = ctx.Output("SavedMean"); - auto *saved_variance = ctx.Output("SavedVariance"); // alloc memory y->mutable_data(ctx.GetPlace()); - mean_out->mutable_data>(ctx.GetPlace()); - variance_out->mutable_data>(ctx.GetPlace()); - saved_mean->mutable_data>(ctx.GetPlace()); - saved_variance->mutable_data>(ctx.GetPlace()); auto &dev_ctx = ctx.template device_context(); - math::SetConstant> - functor; - functor(dev_ctx, saved_mean, static_cast>(0)); - functor(dev_ctx, saved_variance, static_cast>(0)); auto handle = dev_ctx.cudnn_handle(); @@ -160,6 +147,21 @@ class BatchNormKernel // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. + + auto *mean_out = ctx.Output("MeanOut"); + auto *variance_out = ctx.Output("VarianceOut"); + mean_out->mutable_data>(ctx.GetPlace()); + variance_out->mutable_data>(ctx.GetPlace()); + + auto *saved_mean = ctx.Output("SavedMean"); + auto *saved_variance = ctx.Output("SavedVariance"); + saved_mean->mutable_data>(ctx.GetPlace()); + saved_variance->mutable_data>(ctx.GetPlace()); + math::SetConstant> + functor; + functor(dev_ctx, saved_mean, static_cast>(0)); + functor(dev_ctx, saved_variance, static_cast>(0)); + double this_factor = 1. - momentum; CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h index 0bdf27e620a3a7c7b62b955f708a5e2aad1a6986..dd51a11fbe6ad5e528197b67536518c4b31fa355 100644 --- a/paddle/fluid/operators/batch_size_like.h +++ b/paddle/fluid/operators/batch_size_like.h @@ -13,7 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/box_coder_op.h b/paddle/fluid/operators/box_coder_op.h index 3c7cac1cd17042994287effc31a918ebd4353c4c..77fc6c2b62af42e6526b889aeef2d9bab795baec 100644 --- a/paddle/fluid/operators/box_coder_op.h +++ b/paddle/fluid/operators/box_coder_op.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 86f7046058c7001fcaa588727b1cdc0f3f20c35f..3a6a357e81949014a70e5bae1ee0e1c8b9d0c2ce 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/compare_op.h" +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { @@ -29,6 +30,11 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddInput("Y", string::Sprintf( "(LoDTensor) the right hand operand of %s operator", comment.type)); + AddAttr("force_cpu", + "(bool, default false) Force fill output variable to cpu " + "memory. Otherwise, fill output variable to the running " + "device") + .SetDefault(false); AddOutput("Out", string::Sprintf( "(LoDTensor) n-dim bool tensor. Each element is %s", comment.equation)); @@ -75,7 +81,9 @@ class CompareOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx); // CompareOp kernel's device type is decided by input tensor place - kt.place_ = ctx.Input("X")->place(); + bool force_cpu = ctx.Attr("force_cpu"); + kt.place_ = force_cpu ? platform::CPUPlace() + : ctx.Input("X")->place(); return kt; } }; diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 0eedd8ee51ebfff6f553d8e19e97c3a45a95fa6a..4a36b03cb63ac3ea61be1bbc56b8dd0adbe7d334 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/concat_op.h" + +#include #include namespace paddle { @@ -33,7 +35,10 @@ class ConcatOp : public framework::OperatorWithKernel { size_t axis = static_cast(ctx->Attrs().Get("axis")); const size_t n = ins.size(); - PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1."); + PADDLE_ENFORCE_GT(n, 0, "Input tensors count should > 0."); + if (n == 1) { + VLOG(3) << "Warning: concat op have only one input, may waste memory"; + } auto out_dims = ins[0]; size_t in_zero_dims_size = out_dims.size(); diff --git a/paddle/fluid/operators/cond_op.cc b/paddle/fluid/operators/cond_op.cc deleted file mode 100644 index 15dce9e3e28fa0200e332534f42752838da4db92..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/cond_op.cc +++ /dev/null @@ -1,235 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/cond_op.h" -#include "paddle/fluid/operators/gather.h" -#include "paddle/fluid/operators/scatter.h" -#include "paddle/fluid/platform/device_context.h" - -namespace paddle { -namespace operators { - -using Scope = framework::Scope; -using Variable = framework::Variable; -using Tensor = framework::Tensor; -using LoDTensor = framework::LoDTensor; -using DDim = framework::DDim; - -framework::Scope& CondOp::AddSubScope(const Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - auto sub_scopes = sub_scopes_var->GetMutable>(); - auto& sub_scope = scope.NewScope(); - sub_scopes->push_back(&sub_scope); - return sub_scope; -} - -std::vector& CondOp::GetSubScopes( - const framework::Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - return *sub_scopes_var->GetMutable>(); -} - -LoDTensor& CondOp::AddIndexTensor(const Scope& scope) const { - auto index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE_NOT_NULL(index_tensors_var, - "Output(IndexTensors) of CondOp should not be null."); - auto& index_tensors = - *index_tensors_var->GetMutable>(); - index_tensors.push_back(LoDTensor()); - return index_tensors.back(); -} - -std::vector& CondOp::GetIndexTensors( - const framework::Scope& scope) const { - auto* index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE_NOT_NULL(index_tensors_var, - "Output(IndexTensors) of CondOp should not be null."); - return *index_tensors_var->GetMutable>(); -} - -void CondOp::PrepareDataForSubnet( - const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const { - PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs(Xs) of CondOp can't be empty."); - - for (int i = 0; i < BRANCH_NUM; ++i) { - // Create two sub scopes for true and false branches - // sub_scopes[0] for the true branch - // sub_scopes[1] for the false branch - AddSubScope(scope); - // Create two tensors for true and false indices: - // index_tensors[0] for the true branch - // index_tensors[1] for the false branch - AddIndexTensor(scope); - } - - Variable* cond_var = scope.FindVar(Input("Cond")); - PADDLE_ENFORCE_NOT_NULL(cond_var, - "Input(Cond) of CondOp should not be null."); - const LoDTensor* cond = cond_var->GetMutable(); - - // get the true/false index at runtime according to cond tensor - // index_vectors[0]: vector, contains all index for cond[i] == true - // index_vectors[1]: vector, contains all index for cond[i] == false - std::vector> index_vectors; - index_vectors.resize(BRANCH_NUM); - - const int* cond_data = cond->data(); - for (int i = 0; i < cond->dims()[0]; ++i) { - if (cond_data[i]) - index_vectors[TRUE_BRANCH].push_back(i); - else - index_vectors[FALSE_BRANCH].push_back(i); - } - - // put index_vectors[0] and index_vectors[1] into two tensors: - // index_tensors[0] and index_tensors[1] - std::vector& index_tensors = GetIndexTensors(scope); - std::vector& sub_scopes = GetSubScopes(scope); - - for (int i = 0; i < BRANCH_NUM; ++i) { - DDim dim = {static_cast(index_vectors[i].size())}; - int* index_tensor_data_ptr = - index_tensors[i].mutable_data(dim, platform::CPUPlace()); - memcpy(index_tensor_data_ptr, index_vectors[i].data(), - dim[0] * sizeof(int)); - } - - // create input in subscopes according to index_vectors - for (auto& input : Inputs("Xs")) { - Variable* var_parent = scope.FindVar(input); - PADDLE_ENFORCE_NOT_NULL(var_parent); - const auto* tensor_parent = &var_parent->Get(); - - for (int i = 0; i < BRANCH_NUM; ++i) { - Variable* var_child = sub_scopes[i]->FindVar(input); - PADDLE_ENFORCE_NOT_NULL(var_child); - auto* tensor_child = var_child->GetMutable(); - - // Resize child - DDim dim = tensor_parent->dims(); - dim[0] = index_tensors[i].dims()[0]; - tensor_child->mutable_data(dim, platform::CPUPlace()); - - CPUGather(dev_ctx, *tensor_parent, index_tensors[i], tensor_child); - } - } - - // create output_tensors in subscope for sub_net - for (int i = 0; i < BRANCH_NUM; ++i) { - for (auto& output : (*sub_net_op_[i]).Outputs()) { - for (auto& var_name : output.second) { - sub_scopes[i]->Var(var_name); - } - } - } -} - -void CondOp::MergeDataFromSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const { - std::vector& sub_scopes = GetSubScopes(scope); - const std::vector& index_tensors = - GetIndexTensors(scope); - - // Infer the output dim, out_dim[0] = true_dim[0] + false_dim[0] - PADDLE_ENFORCE(!Outputs("Outs").empty(), - "Outputs(Outs) of CondOp can't be empty."); - for (auto& output : Outputs("Outs")) { - const LoDTensor* tensor_t_out = - &sub_scopes[TRUE_BRANCH]->FindVar(output)->Get(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL"); - const LoDTensor* tensor_f_out = - &sub_scopes[FALSE_BRANCH]->FindVar(output)->Get(); - PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL"); - - auto* var_out = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_out, "Output not found"); - LoDTensor* tensor_out = var_out->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, - "True output tensor should not be NULL"); - - DDim true_dim = tensor_t_out->dims(); - DDim false_dim = tensor_f_out->dims(); - true_dim[0] = 0; - false_dim[0] = 0; - PADDLE_ENFORCE_EQ(true_dim, false_dim, - "Outputs not of the same shape except the first dim"); - - DDim out_dim = tensor_t_out->dims(); - out_dim[0] = tensor_t_out->dims()[0] + tensor_f_out->dims()[0]; - tensor_out->Resize(out_dim); - tensor_out->mutable_data(platform::CPUPlace()); - } - - // merge output results: - // output_tensor = true_output_tensor + false_output_tensor - for (auto& output : Outputs("Outs")) { - Variable* var_parent = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_parent); - auto* tensor_parent = var_parent->GetMutable(); - - for (int i = 0; i < BRANCH_NUM; ++i) { - Variable* var_child = sub_scopes[i]->FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_child); - auto* tensor_child = &var_child->Get(); - ScatterAssign(dev_ctx, *tensor_child, index_tensors[i], - tensor_parent); - } - } -} - -void CondOp::RunImpl(const Scope& scope, const platform::Place& place) const { - // get device context from pool - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& dev_ctx = *pool.Get(place); - - PrepareDataForSubnet(scope, dev_ctx); - std::vector& sub_scopes = GetSubScopes(scope); - for (int i = 0; i < BRANCH_NUM; ++i) { - sub_net_op_[i]->Run(*sub_scopes[i], place); - } - MergeDataFromSubnet(scope, dev_ctx); -} - -class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { - public: - CondOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Cond", "The condition, which is a bool vector"); - AddInput("Xs", "Inputs of Subnets").AsDuplicable(); - AddOutput("Outs", "Outputs of Cond_Op after merge").AsDuplicable(); - - AddOutput("SubScopes", "sub scopes for true and false branches"); - AddOutput("IndexTensors", "Index Tensors contains indices for true/false"); - - AddComment(R"DOC( -Sample Dependent Conditional Operator. - -Given Cond[i] as a 1/0 vector to indicate true/false: -Out[i] = subnet_true[i], if Cond[i] == true -Out[i] = subnet_false[i], if Cond[i] == false - -)DOC"); - } -}; - -} // namespace operators -} // namespace paddle - -REGISTER_OP_WITHOUT_GRADIENT(cond, paddle::operators::CondOp, - paddle::operators::CondOpProtoAndCheckerMaker); diff --git a/paddle/fluid/operators/cond_op.h b/paddle/fluid/operators/cond_op.h deleted file mode 100644 index a04fae2182005d4eb08305e943449977bfb637f9..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/cond_op.h +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include "glog/logging.h" -#include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/net_op.h" - -namespace paddle { -namespace operators { - -/* - * @brief CondOp is a dynamic if-else Operator - * - * It has a input tensor named cond indicating which netop each instance will - * run. - * - * if cond == 1, it will run true_net, which is a NetOp. - * - * if cond == 0, it will run false_net, which is another NetOp. - */ -class CondOp : public framework::OperatorBase { - public: - CondOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) { - sub_net_op_.resize(BRANCH_NUM); - } - - CondOp(const CondOp& o) - : framework::OperatorBase( - static_cast(o)) { - // TODO(yuyang18): Implement copy ctor well. - PADDLE_THROW("Not implemented"); - } - - framework::Scope& AddSubScope(const framework::Scope& scope) const; - std::vector& GetSubScopes( - const framework::Scope& scope) const; - - framework::LoDTensor& AddIndexTensor(const framework::Scope& scope) const; - std::vector& GetIndexTensors( - const framework::Scope& scope) const; - - void PrepareDataForSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const; - void MergeDataFromSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const; - - /* - * Set True Block - */ - void set_truenet(std::unique_ptr&& net) { - sub_net_op_[TRUE_BRANCH] = std::move(net); - } - - /* - * Set False Block - */ - void set_falsenet(std::unique_ptr&& net) { - sub_net_op_[FALSE_BRANCH] = std::move(net); - } - - private: - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override; - - private: - const int TRUE_BRANCH = 0; - const int FALSE_BRANCH = 1; - const int BRANCH_NUM = 2; - - // sub_net_op_[0]: subnet_t - // sub_net_op_[1]: subnet_f - std::vector> sub_net_op_; -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 337b34e8f0bf4cb89753235205be9eb058dd01ab..bff2c34ec893d0e6212426b108dd98b0d0d0fb48 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -54,7 +54,18 @@ class ConditionalOp : public framework::OperatorBase { "numel should be 1, actual numel is %d", ips[0]->numel()); } - return ips[0]->data()[0]; + bool res = false; + if (platform::is_gpu_place(ips[0]->place())) { +#ifdef PADDLE_WITH_CUDA + framework::LoDTensor cpu_tensor; + framework::TensorCopy(*ips[0], platform::CPUPlace(), &cpu_tensor); + platform::DeviceContextPool::Instance().Get(ips[0]->place())->Wait(); + res = cpu_tensor.data()[0]; +#endif + } else { + res = ips[0]->data()[0]; + } + return res; } }; diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index a32aba4c1ff2f5e775aeb41f25b02322dbc6a64a..c70e3cc3c9198008d9eca5f462000aa67ff7e5ba 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -128,10 +128,32 @@ class CUDNNConvOpKernel : public framework::OpKernel { handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &algo)); + +#if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) + // Tensor core is supported since the volta GPU and + // is only enabled when input and filter data are float16 + if (dev_ctx.GetComputeCapability() >= 70 && + std::type_index(typeid(T)) == + std::type_index(typeid(platform::float16))) { + PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_TENSOR_OP_MATH)); + // Currently tensor core is only enabled using this algo + algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; + } else { + PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_DEFAULT_MATH)); + } +#endif + // get workspace size able to allocate PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); + // It is possible for float16 on Volta GPU to allocate more memory than + // the limit because the algo is overrided to use tensor core. + PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, + "workspace_size to be allocated exceeds the limit"); + // Allocate on GPU memory platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 650bc92be22af9ea8afcacf590a11190109e8811..695db841a4ec666b2c8783dfc7df959711341d85 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -13,6 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_op.h" + +#include +#include + #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index b2a3cfc89f18eff24c941c664b1184b4485ab895..08f5939d42a41d235a94eff16cf2f558068d6aaa 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_transpose_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index d4e4b641ece9ed120904ded6f8baed65a2666213..bfc0177c2a0da1627fbca532764fdae8167b6b2a 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/im2col.h" diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index 2b2a733fb9f162755e5c548fec617937d86689dd..3f5fab3b382bea97f43e4bc1b2cd436c956ba264 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/crop_op.h b/paddle/fluid/operators/crop_op.h index c5ac6849789587f2f41588f79bd538f7b79a7478..f05c2e23284e3a24cf48442996f671ec6084c391 100644 --- a/paddle/fluid/operators/crop_op.h +++ b/paddle/fluid/operators/crop_op.h @@ -13,7 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu index 54e0b1d9ad83c5f01f3f0dfbc2a95c642c0aaadc..bbad74e96d9c6c1be24639b63e472f18a599cfab 100644 --- a/paddle/fluid/operators/ctc_align_op.cu +++ b/paddle/fluid/operators/ctc_align_op.cu @@ -15,6 +15,7 @@ limitations under the License. */ #include #include #include +#include #include "paddle/fluid/operators/ctc_align_op.h" namespace paddle { diff --git a/paddle/fluid/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h index 70698d99589ae9e2e18ec8b1c1bb3bc8c7476131..9c5c6f5aa03632fe3079074d4b164f871fad634d 100644 --- a/paddle/fluid/operators/ctc_align_op.h +++ b/paddle/fluid/operators/ctc_align_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index 2b19f0448955d2d7582f23ac133c14ffdf5c9e49..719a7465b8d58ef8588ff1e83c2b971eb6fbb00f 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -2,7 +2,8 @@ if(WITH_DISTRIBUTE) grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") - set_source_files_properties(test_serde.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(serde_test SRCS test_serde.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr + set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr cares zlib protobuf sendrecvop_grpc) + cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_table_op) endif() diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.cc b/paddle/fluid/operators/detail/bytebuffer_stream.cc index 741dd51de9e75feb608161579e56cb160b058ebb..a14171563edb0ac9a22b7ae493c965de3efb7823 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.cc +++ b/paddle/fluid/operators/detail/bytebuffer_stream.cc @@ -17,7 +17,7 @@ limitations under the License. */ // file and did some modifications so that we can send gRPC // requests without too much copying of the tensor data. -#include "bytebuffer_stream.h" +#include "paddle/fluid/operators/detail/bytebuffer_stream.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.h b/paddle/fluid/operators/detail/bytebuffer_stream.h index 1791a48aab1b66147f645c90757b35ef5f6e001b..054dd4ff294414cca55d7e033f2c5403bbb85526 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.h +++ b/paddle/fluid/operators/detail/bytebuffer_stream.h @@ -19,9 +19,11 @@ limitations under the License. */ #pragma once -#include +#include + #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" +#include "grpc++/grpc++.h" namespace grpc { // A ZeroCopyInputStream that reads from grpc_byte_buffer @@ -56,7 +58,7 @@ class GrpcBufferReader final *data = GRPC_SLICE_START_PTR(slice_) + GRPC_SLICE_LENGTH(slice_) - backup_count_; GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX); - *size = (int)backup_count_; + *size = static_cast(backup_count_); backup_count_ = 0; return true; } @@ -68,7 +70,7 @@ class GrpcBufferReader final *data = GRPC_SLICE_START_PTR(slice_); // On win x64, int is only 32bit GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(slice_) <= INT_MAX); - byte_count_ += * size = (int)GRPC_SLICE_LENGTH(slice_); + byte_count_ += * size = static_cast(GRPC_SLICE_LENGTH(slice_)); return true; } diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index e73bbe7537a9b37d358a5aa4a076032b57fca513..661dfa69fe1580ff3890f12defcd124225be0c06 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -12,8 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "grpc_client.h" +#include "paddle/fluid/operators/detail/grpc_client.h" + #include + +#include + #include "paddle/fluid/framework/threadpool.h" namespace paddle { @@ -31,7 +35,8 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, const framework::Scope* p_scope = &scope; const auto ch = GetChannel(ep_val); - framework::Async([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, this] { + framework::AsyncIO([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, + this] { auto* var = p_scope->FindVar(var_name_val); ::grpc::ByteBuffer req; @@ -52,7 +57,7 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_); call->StartCall(); - call->Finish(&s->reply_, &s->status_, (void*)s); + call->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); }); req_count_++; @@ -61,17 +66,15 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, } void ProcGetResponse(const VarHandle& var_h, - // const sendrecv::VariableMessage& ret_msg) { const ::grpc::ByteBuffer& ret_msg) { - framework::Variable* outvar = NULL; - DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, outvar); + framework::Variable* outvar = nullptr; + DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, &outvar); } template void RequestToByteBuffer(const T& proto, ::grpc::ByteBuffer* result) { ::grpc::Slice slice(proto.ByteSizeLong()); - proto.SerializeWithCachedSizesToArray( - const_cast(reinterpret_cast(slice.begin()))); + proto.SerializeWithCachedSizesToArray(const_cast(slice.begin())); ::grpc::ByteBuffer tmp(&slice, 1); result->Swap(&tmp); } @@ -87,11 +90,15 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, const framework::Scope* p_scope = &scope; const auto ch = GetChannel(ep_val); - framework::Async([var_name_val, ep_val, p_scope, p_ctx, time_out, ch, this] { + framework::AsyncIO([var_name_val, ep_val, p_scope, p_ctx, time_out, ch, + this] { + // prepare input sendrecv::VariableMessage req; req.set_varname(var_name_val); + ::grpc::ByteBuffer buf; + RequestToByteBuffer(req, &buf); - // varhandle + // var handle VarHandle var_h; var_h.ep = ep_val; var_h.scope = p_scope; @@ -103,13 +110,10 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, s->Prepare(var_h, time_out); s->response_call_back_ = ProcGetResponse; - ::grpc::ByteBuffer buf; - RequestToByteBuffer(req, &buf); - auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/GetVariable", buf, &cq_); call->StartCall(); - call->Finish(&s->reply_, &s->status_, (void*)s); + call->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); }); req_count_++; @@ -117,6 +121,49 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, return true; } +bool RPCClient::AsyncPrefetchVariable(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out) { + const platform::DeviceContext* p_ctx = &ctx; + const std::string ep_val = ep; + const std::string in_var_name_val = in_var_name; + const std::string out_var_name_val = out_var_name; + const framework::Scope* p_scope = &scope; + const auto ch = GetChannel(ep_val); + + framework::AsyncIO([in_var_name_val, out_var_name_val, ep_val, p_scope, p_ctx, + time_out, ch, this] { + auto* var = p_scope->FindVar(in_var_name_val); + + ::grpc::ByteBuffer req; + SerializeToByteBuffer(in_var_name_val, var, *p_ctx, &req, out_var_name_val); + + // var handle + VarHandle var_h; + var_h.ep = ep_val; + var_h.scope = p_scope; + var_h.name = out_var_name_val; + var_h.ctx = p_ctx; + + // stub context + GetProcessor* s = new GetProcessor(ch); + s->Prepare(var_h, time_out); + s->response_call_back_ = ProcGetResponse; + + auto call = s->stub_g_.PrepareUnaryCall( + s->context_.get(), "/sendrecv.SendRecvService/PrefetchVariable", req, + &cq_); + call->StartCall(); + call->Finish(&s->reply_, &s->status_, static_cast(s)); + }); + + req_count_++; + return true; +} + void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { const auto ch = GetChannel(ep); @@ -126,7 +173,7 @@ void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { sendrecv::VariableMessage req; req.set_varname(BATCH_BARRIER_MESSAGE); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, (void*)s); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); req_count_++; } @@ -138,7 +185,7 @@ void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { sendrecv::VariableMessage req; req.set_varname(FETCH_BARRIER_MESSAGE); auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, (void*)s); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); req_count_++; } @@ -151,7 +198,7 @@ bool RPCClient::Wait() { std::vector> waits(req_count_); for (int i = 0; i < req_count_; i++) { - waits[i] = framework::Async([i, &a, this] { a[i] = Proceed(); }); + waits[i] = framework::AsyncIO([i, &a, this] { a[i] = Proceed(); }); } for (int i = 0; i < req_count_; i++) { @@ -204,7 +251,6 @@ std::shared_ptr RPCClient::GetChannel(const std::string& ep) { } grpc::ChannelArguments args; - args.SetInt("grpc.testing.fixed_reconnect_backoff_ms", 5000); args.SetCompressionAlgorithm(GRPC_COMPRESS_NONE); args.SetMaxSendMessageSize(std::numeric_limits::max()); args.SetMaxReceiveMessageSize(std::numeric_limits::max()); diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index 8216ac52fbbb3dcd2f30957cde58a850a77b08d6..4425b19328f503eb7f9022916ed6452cdfea4eeb 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -14,10 +14,9 @@ limitations under the License. */ #pragma once -#include -#include #include -#include + +#include // NOLINT #include #include #include @@ -25,11 +24,11 @@ limitations under the License. */ #include #include -#include -#include -#include -#include - +#include "grpc++/generic/generic_stub.h" +#include "grpc++/grpc++.h" +#include "grpc++/support/byte_buffer.h" +#include "grpc++/support/slice.h" +#include "grpc/support/log.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" @@ -172,6 +171,13 @@ class RPCClient { const std::string& var_name, int64_t time_out = 600 * 1000); + bool AsyncPrefetchVariable(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out = 600 * 1000); + void AsyncSendBatchBarrier(const std::string& ep, int64_t time_out = 600 * 1000); diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 9691d1e86b111def5b82e022dd01795aaf5c7b0d..119e146e078e476b2768a8495ea63e468f952fd2 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -14,6 +14,9 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/grpc_server.h" +#include +#include + using ::grpc::ServerAsyncResponseWriter; namespace paddle { @@ -128,6 +131,59 @@ class RequestGet final : public RequestBase { SimpleBlockQueue* queue_; }; +class RequestPrefetch final : public RequestBase { + public: + explicit RequestPrefetch(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, + const platform::DeviceContext* dev_ctx, + framework::Executor* executor, + framework::ProgramDesc* program, + framework::ExecutorPrepareContext* prefetch_ctx) + : RequestBase(service, cq, dev_ctx), + responder_(&ctx_), + scope_(scope), + executor_(executor), + program_(program), + prefetch_ctx_(prefetch_ctx) { + request_.reset(new VariableResponse(scope, dev_ctx_)); + int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); + service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, + cq_, cq_, this); + } + + virtual ~RequestPrefetch() {} + + virtual std::string GetReqName() { return request_->Varname(); } + + virtual void Process() { + // prefetch process... + ::grpc::ByteBuffer reply; + + std::string var_name = request_->OutVarname(); + VLOG(3) << "prefetch var " << var_name; + auto var_desc = program_->Block(0).FindVar(var_name); + framework::Scope* local_scope = &scope_->NewScope(); + auto* var = local_scope->FindVar(var_name); + InitializeVariable(var, var_desc->GetType()); + executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false); + + SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); + + responder_.Finish(reply, ::grpc::Status::OK, this); + status_ = FINISH; + } + + protected: + std::shared_ptr request_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; + framework::Scope* scope_; + framework::Executor* executor_; + framework::ProgramDesc* program_; + framework::ExecutorPrepareContext* prefetch_ctx_; + int blkid_; +}; + void AsyncGRPCServer::WaitClientGet(int count) { int fetch_barriers = 0; while (fetch_barriers < count) { @@ -140,52 +196,62 @@ void AsyncGRPCServer::WaitClientGet(int count) { void AsyncGRPCServer::RunSyncUpdate() { ::grpc::ServerBuilder builder; - builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials()); + builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), + &selected_port_); builder.SetMaxSendMessageSize(std::numeric_limits::max()); builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); builder.RegisterService(&service_); cq_send_ = builder.AddCompletionQueue(); cq_get_ = builder.AddCompletionQueue(); + cq_prefetch_ = builder.AddCompletionQueue(); server_ = builder.BuildAndStart(); - LOG(INFO) << "Server listening on " << address_ << std::endl; + LOG(INFO) << "Server listening on " << address_ + << " selected port: " << selected_port_; std::function send_register = std::bind(&AsyncGRPCServer::TryToRegisterNewSendOne, this); std::function get_register = std::bind(&AsyncGRPCServer::TryToRegisterNewGetOne, this); + std::function prefetch_register = + std::bind(&AsyncGRPCServer::TryToRegisterNewPrefetchOne, this); + // TODO(wuyi): Run these "HandleRequest" in thread pool t_send_.reset( new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_send_.get(), "cq_send", send_register))); - t_get_.reset( new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_get_.get(), "cq_get", get_register))); - + t_prefetch_.reset(new std::thread( + std::bind(&AsyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), + "cq_prefetch", prefetch_register))); // wait server server_->Wait(); t_send_->join(); t_get_->join(); + t_prefetch_->join(); } void AsyncGRPCServer::ShutdownQueue() { std::unique_lock lock(cq_mutex_); cq_send_->Shutdown(); cq_get_->Shutdown(); - is_shut_down_ = true; + cq_prefetch_->Shutdown(); } // This URL explains why shutdown is complicate: void AsyncGRPCServer::ShutDown() { - server_->Shutdown(); + is_shut_down_ = true; ShutdownQueue(); + server_->Shutdown(); } void AsyncGRPCServer::TryToRegisterNewSendOne() { std::unique_lock lock(cq_mutex_); if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; return; } RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, @@ -196,6 +262,7 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { void AsyncGRPCServer::TryToRegisterNewGetOne() { std::unique_lock lock(cq_mutex_); if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; return; } RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, @@ -203,33 +270,49 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { VLOG(4) << "Create RequestGet status:" << get->Status(); } +void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { + std::unique_lock lock(cq_mutex_); + if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewPrefetchOne"; + return; + } + RequestPrefetch* prefetch = + new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, + executor_, program_, prefetch_ctx_); + + VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); +} + // FIXME(typhoonzero): change cq_name to enum. void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, - std::string cq_name, + const std::string& cq_name, std::function TryToRegisterNewOne) { TryToRegisterNewOne(); void* tag = NULL; bool ok = false; + while (true) { + VLOG(3) << "HandleRequest for " << cq_name << " while in"; if (!cq->Next(&tag, &ok)) { - LOG(INFO) << cq_name << " get CompletionQueue shutdown!"; + LOG(INFO) << cq_name << " CompletionQueue shutdown!"; break; } + VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; PADDLE_ENFORCE(tag); // FIXME(typhoonzero): de-couple the barriers with recv_op - if (cq_name == "cq_get") WaitCond(1); - if (cq_name == "cq_send") WaitCond(0); + if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); + if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); - RequestBase* base = (RequestBase*)tag; + RequestBase* base = reinterpret_cast(tag); // reference: // https://github.com/tensorflow/tensorflow/issues/5596 // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I if (!ok) { - LOG(WARNING) << cq_name << " recv no regular event:argument name" - << base->GetReqName(); + LOG(WARNING) << cq_name << " recv no regular event:argument name[" + << base->GetReqName() << "]"; TryToRegisterNewOne(); delete base; continue; diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 10e6dd45a901d36de4a6577db4da05551645eb73..b6110f92ed4f38a156e0c99ecfb399f3f47a169e 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -14,10 +14,14 @@ limitations under the License. */ #pragma once -#include -#include +#include +#include // NOLINT +#include +#include "grpc++/grpc++.h" +#include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/var_type.h" @@ -53,6 +57,18 @@ class AsyncGRPCServer final { void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } + void SetProgram(framework::ProgramDesc *program) { program_ = program; } + + void SetPrefetchBlkdId(int blkid) { prefetch_blk_id_ = blkid; } + + void SetExecutor(framework::Executor *executor) { executor_ = executor; } + + void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { + prefetch_ctx_ = prepared; + } + + int GetSelectedPort() { return selected_port_; } + const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } void Push(const std::string &msg_name) { @@ -62,10 +78,12 @@ class AsyncGRPCServer final { void ShutDown(); protected: - void HandleRequest(::grpc::ServerCompletionQueue *cq, std::string cq_name, + void HandleRequest(::grpc::ServerCompletionQueue *cq, + const std::string &cq_name, std::function TryToRegisterNewOne); void TryToRegisterNewSendOne(); void TryToRegisterNewGetOne(); + void TryToRegisterNewPrefetchOne(); void ShutdownQueue(); private: @@ -73,6 +91,7 @@ class AsyncGRPCServer final { volatile bool is_shut_down_ = false; std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_prefetch_; GrpcService::AsyncService service_; std::unique_ptr<::grpc::Server> server_; @@ -83,6 +102,7 @@ class AsyncGRPCServer final { // received variable from RPC, operators fetch variable from this queue. SimpleBlockQueue var_get_queue_; + // client send variable to this queue. ReceivedQueue var_recv_queue_; // condition of the sub program @@ -92,6 +112,13 @@ class AsyncGRPCServer final { std::unique_ptr t_send_; std::unique_ptr t_get_; + std::unique_ptr t_prefetch_; + + int prefetch_blk_id_; + framework::ExecutorPrepareContext *prefetch_ctx_; + framework::ProgramDesc *program_; + framework::Executor *executor_; + int selected_port_; }; }; // namespace detail diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..c51933718f4ca78e87c77e007c485642000d247d --- /dev/null +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -0,0 +1,140 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include // NOLINT + +#include "gtest/gtest.h" +#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/grpc_server.h" + +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace framework = paddle::framework; +namespace platform = paddle::platform; +namespace detail = paddle::operators::detail; + +USE_OP(lookup_table); + +std::unique_ptr rpc_service_; + +framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) { + auto root_block = program->MutableBlock(0); + auto* block = program->AppendBlock(*root_block); + + framework::VariableNameMap input({{"W", {"w"}}, {"Ids", {"ids"}}}); + framework::VariableNameMap output({{"Output", {"out"}}}); + auto op = block->AppendOp(); + op->SetType("lookup_table"); + op->SetInput("W", {"w"}); + op->SetInput("Ids", {"ids"}); + op->SetOutput("Out", {"out"}); + + auto& out = *root_block->Var("out"); + out.SetType(framework::proto::VarType::SELECTED_ROWS); + out.SetShape({10, 10}); + + return block; +} + +void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { + auto w_var = scope->Var("w"); + w_var->GetMutable(); + + auto out_var = scope->Var("out"); + out_var->GetMutable(); + + auto ids_var = scope->Var("ids"); + ids_var->GetMutable(); +} + +void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto ids_var = scope->Var("ids")->GetMutable(); + auto rows = ids_var->mutable_rows(); + for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i * 2); + ids_var->mutable_value()->Resize({rows_numel, 1}); + ids_var->mutable_value()->mutable_data(*place); +} + +void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto w = scope->Var("w")->GetMutable(); + auto rows = w->mutable_rows(); + for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i); + auto w_value = w->mutable_value(); + w_value->Resize({rows_numel, 10}); + + auto ptr = w_value->mutable_data(*place); + + for (int64_t i = 0; i < w_value->numel(); ++i) { + ptr[i] = static_cast(i / 10); + } +} + +void StartServer(const std::string& endpoint) { + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + framework::ProgramDesc program; + framework::Scope scope; + platform::CPUPlace place; + framework::Executor exe(place); + platform::CPUDeviceContext ctx(place); + auto* block = AppendPrefetchBlcok(&program); + auto prepared = exe.Prepare(program, block->ID()); + InitTensorsOnServer(&scope, &place, 10); + + rpc_service_->SetProgram(&program); + rpc_service_->SetPrefetchPreparedCtx(prepared.get()); + rpc_service_->SetDevCtx(&ctx); + rpc_service_->SetScope(&scope); + rpc_service_->SetExecutor(&exe); + + rpc_service_->RunSyncUpdate(); +} + +TEST(PREFETCH, CPU) { + // start up a server instance backend + std::thread server_thread(StartServer, "127.0.0.1:8889"); + sleep(2); + framework::Scope scope; + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + // create var on local scope + int64_t rows_numel = 5; + InitTensorsOnClient(&scope, &place, rows_numel); + std::string in_var_name("ids"); + std::string out_var_name("out"); + + detail::RPCClient client; + client.AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name, + out_var_name); + client.Wait(); + + auto var = scope.Var(out_var_name); + auto value = var->GetMutable()->value(); + auto ptr = value.mutable_data(place); + + rpc_service_->ShutDown(); + server_thread.join(); + rpc_service_.reset(nullptr); + + for (int64_t i = 0; i < rows_numel; ++i) { + EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast(i * 2)); + } +} diff --git a/paddle/fluid/operators/detail/grpc_service.h b/paddle/fluid/operators/detail/grpc_service.h index ae6f9db3bd31a4b4839b34e8e53dd87f1ecf4b1d..e6dab2f5a3a4280f3979417c3ca2d884a0b8ff2f 100644 --- a/paddle/fluid/operators/detail/grpc_service.h +++ b/paddle/fluid/operators/detail/grpc_service.h @@ -76,10 +76,11 @@ namespace detail { enum class GrpcMethod { kSendVariable, kGetVariable, + kPrefetchVariable, }; static const int kGrpcNumMethods = - static_cast(GrpcMethod::kGetVariable) + 1; + static_cast(GrpcMethod::kPrefetchVariable) + 1; inline const char* GrpcMethodName(GrpcMethod id) { switch (id) { @@ -87,6 +88,8 @@ inline const char* GrpcMethodName(GrpcMethod id) { return "/sendrecv.SendRecvService/SendVariable"; case GrpcMethod::kGetVariable: return "/sendrecv.SendRecvService/GetVariable"; + case GrpcMethod::kPrefetchVariable: + return "/sendrecv.SendRecvService/PrefetchVariable"; } // Shouldn't be reached. @@ -114,5 +117,5 @@ class GrpcService final { }; } // namespace detail -} // namespace operator +} // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/proto_encoder_helper.h b/paddle/fluid/operators/detail/proto_encoder_helper.h index 4a7bfb8bd586fe84c9243bc64117d146c4386674..d91d054b2507f32d1e948dde33da06a70cabe775 100644 --- a/paddle/fluid/operators/detail/proto_encoder_helper.h +++ b/paddle/fluid/operators/detail/proto_encoder_helper.h @@ -19,7 +19,9 @@ limitations under the License. */ #pragma once -#include +#include + +#include "grpc++/grpc++.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -142,6 +144,6 @@ class ProtoEncodeHelper { char* limit_; // Just for CHECKs }; -} // detail -} // operators -} // paddle +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/send_recv.proto b/paddle/fluid/operators/detail/send_recv.proto index 598aaa4c51a6c5cd32eeffe08bbae849aee1a1df..02bb2b9cebb87b83aa1cbef0c644f969b4d17284 100644 --- a/paddle/fluid/operators/detail/send_recv.proto +++ b/paddle/fluid/operators/detail/send_recv.proto @@ -21,6 +21,8 @@ service SendRecvService { rpc SendVariable(VariableMessage) returns (VoidMessage) {} // Argument VariableMessage for GetVariable should only contain varname. rpc GetVariable(VariableMessage) returns (VariableMessage) {} + // pre-fetch variable by given variable name and Ids + rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} } // VariableMessage is serialized paddle variable message. @@ -59,12 +61,14 @@ message VariableMessage { // lod details: int64 lod_level = 5; repeated LodData lod = 6; + // selected_rows height, aka. original dim0 + int64 slr_height = 7; // tensor data - bytes serialized = 7; + bytes serialized = 8; // selected_rows data - bytes rows = 8; + bytes rows = 9; + // Look up table block execution output variable name. + string out_varname = 10; } message VoidMessage {} - -message TestMessage { int64 test_1 = 1; } diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index d7bbf79c50651943d91c38bbaab775f5ee8dc395..16c612c45a37dd2ffd17f8d5f5946df30e9b3fe6 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detail/sendrecvop_utils.h" + #include -#include +#include // NOLINT + #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" #include "paddle/fluid/framework/data_type.h" @@ -28,11 +30,9 @@ namespace detail { void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg) { + ::grpc::ByteBuffer* msg, + const std::string& out_name) { using VarMsg = sendrecv::VariableMessage; - sendrecv::VariableMessage request; - std::string header; - request.AppendToString(&header); // When using GPU, need to free the copied CPU buffer // when the ByteBuffer destroies // TODO(typhoonzero): add unref here, if we have dependent @@ -42,7 +42,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void* buf = malloc(1024); void* payload = nullptr; size_t payload_size; - ProtoEncodeHelper e((char*)buf, 1024); + ProtoEncodeHelper e(static_cast(buf), 1024); e.WriteString(VarMsg::kVarnameFieldNumber, name); if (var->IsType()) { e.WriteUint64(VarMsg::kTypeFieldNumber, 0); @@ -50,6 +50,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, e.WriteUint64(VarMsg::kTypeFieldNumber, 1); } + if (!out_name.empty()) { + e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name); + } switch (framework::ToVarType(var->Type())) { case framework::proto::VarType_Type_LOD_TENSOR: { auto tensor = var->Get(); @@ -108,6 +111,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, e.WriteUint64(VarMsg::kDimsFieldNumber, dim); } e.WriteUint64(VarMsg::kLodLevelFieldNumber, 0); + e.WriteUint64(VarMsg::kSlrHeightFieldNumber, slr->height()); auto* tensor = slr->mutable_value(); if (platform::is_gpu_place(ctx.GetPlace())) { #ifdef PADDLE_WITH_CUDA @@ -151,10 +155,10 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, framework::proto::VarType_Type_SELECTED_ROWS) { auto* slr = var->GetMutable(); - ProtoEncodeHelper e2((char*)buf, 128); + ProtoEncodeHelper e2(static_cast(buf), 128); // NOTE: rows is of type int64_t size_t rows_memory_size = - slr->rows().capacity() * framework::SizeOfType(typeid(int64_t)); + slr->rows().size() * framework::SizeOfType(typeid(int64_t)); e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size); slices[2] = ::grpc::Slice(e2.size()); memcpy(const_cast(slices[2].begin()), e2.data(), e2.size()); @@ -180,10 +184,10 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable*& var) { + framework::Variable** var) { operators::detail::VariableResponse resp(scope, &ctx); PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); - var = resp.GetVar(); + *var = resp.GetVar(); } } // namespace detail diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index 3b875627032a6b08cc70280b3cc825c2a703923f..c72e1bd076f670458f3915072154847db6205092 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include #include @@ -35,16 +36,23 @@ namespace detail { #define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" #define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV" +static int64_t GetTimestamp() { + struct timeval tp; + gettimeofday(&tp, NULL); + return tp.tv_sec * 1000 + tp.tv_usec / 1000; +} + typedef void (*DestroyCallback)(void*); void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg); + ::grpc::ByteBuffer* msg, + const std::string& out_varname = std::string()); void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable*& var); + framework::Variable** var); inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { switch (type) { diff --git a/paddle/fluid/operators/detail/test_serde.cc b/paddle/fluid/operators/detail/serde_test.cc similarity index 93% rename from paddle/fluid/operators/detail/test_serde.cc rename to paddle/fluid/operators/detail/serde_test.cc index e646c894d18d37f5343a10df2542a0e46ab13372..cb5f89583436b059ac4d6509dac9f2e3868561aa 100644 --- a/paddle/fluid/operators/detail/test_serde.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -14,9 +14,9 @@ limitations under the License. */ #include #include -#include +#include // NOLINT -#include +#include "google/protobuf/text_format.h" #include "gtest/gtest.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" @@ -40,14 +40,14 @@ void RunSerdeTestSelectedRows(platform::Place place) { // serialize var to ByteBuffer framework::Variable var; auto* slr = var.GetMutable(); + slr->set_height(1000); auto* tensor = slr->mutable_value(); auto* rows = slr->mutable_rows(); - tensor->Resize(framework::make_ddim({2, 10})); + tensor->Resize(framework::make_ddim({564, 128})); tensor->mutable_data(place); - int tensor_numel = 2 * 10; + int tensor_numel = 564 * 128; math::set_constant(ctx, tensor, 32.7); - rows->push_back(3); - rows->push_back(10); + for (int i = 0; i < 564; ++i) rows->push_back(i); ::grpc::ByteBuffer msg; operators::detail::SerializeToByteBuffer("myvar", &var, ctx, &msg); @@ -64,6 +64,7 @@ void RunSerdeTestSelectedRows(platform::Place place) { sendrecv::VariableMessage varmsg; EXPECT_TRUE(varmsg.ParseFromString(tmp)); + // deserialize bytebuffer EXPECT_EQ(varmsg.varname(), "myvar"); EXPECT_EQ(varmsg.type(), 1); @@ -74,8 +75,10 @@ void RunSerdeTestSelectedRows(platform::Place place) { for (int i = 0; i < tensor_numel; ++i) { EXPECT_FLOAT_EQ(tensor_data[i], 32.7); } - EXPECT_EQ(rows_data[0], 3); - EXPECT_EQ(rows_data[1], 10); + for (int i = 0; i < 564; ++i) { + EXPECT_EQ(rows_data[i], i); + } + // deserialize zero-copy // framework::Variable var2; // operators::detail::DeserializeFromByteBuffer(msg, ctx, &var2); @@ -104,8 +107,10 @@ void RunSerdeTestSelectedRows(platform::Place place) { for (int i = 0; i < tensor_numel; ++i) { EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); } - EXPECT_EQ(rows_data2[0], 3); - EXPECT_EQ(rows_data2[1], 10); + for (size_t i = 0; i < rows2->size(); ++i) { + EXPECT_EQ(rows_data2[i], i); + } + EXPECT_EQ(slr2->height(), 1000); } void RunTestLodTensor(platform::Place place, int from_type = 0) { diff --git a/paddle/fluid/operators/detail/simple_block_queue.h b/paddle/fluid/operators/detail/simple_block_queue.h index 36b58b0c6700b5af7eaea92d2b0c32adaba35bb8..69773e05df7ed76f31c26f4304693fec2e9aac9c 100644 --- a/paddle/fluid/operators/detail/simple_block_queue.h +++ b/paddle/fluid/operators/detail/simple_block_queue.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include -#include +#include // NOLINT namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc index 12e8eb0b4da2252b104415aef4156bf100c3e565..c9d7fd6d1581f6f4182e9e3e0d633c13a3c336a5 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -13,7 +13,11 @@ // limitations under the License. #include "paddle/fluid/operators/detail/variable_response.h" -#include + +#include +#include +#include + #include "paddle/fluid/operators/detail/send_recv.pb.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" @@ -48,6 +52,8 @@ bool ReadRaw(::google::protobuf::io::CodedInputStream* input, void* dest, int size) { const void* data = NULL; int size_to_write = 0; + int length = size; + int total_written = 0; if (platform::is_gpu_place(place)) { #ifdef PADDLE_WITH_CUDA @@ -56,16 +62,21 @@ bool ReadRaw(::google::protobuf::io::CodedInputStream* input, platform::CPUPlace cpu; char* p = reinterpret_cast(dest); - while (size > 0) { + while (total_written < length) { if (!input->GetDirectBufferPointer(&data, &size_to_write)) { return false; } - + // NOTE: if raw buffer is large and have two neighbor fields of raw + // buffers GetDirectBufferPointer can get all of them, use length to + // truncate it. + if (total_written + size_to_write > length) { + size_to_write = length - total_written; + } memory::Copy(boost::get(place), reinterpret_cast(p), cpu, data, size_to_write, gpu_dev_ctx.stream()); p += size_to_write; - size -= size_to_write; + total_written += size_to_write; input->Skip(size_to_write); } @@ -77,16 +88,21 @@ bool ReadRaw(::google::protobuf::io::CodedInputStream* input, } char* p = reinterpret_cast(dest); - while (size > 0) { + while (total_written < length) { if (!input->GetDirectBufferPointer(&data, &size_to_write)) { return false; } + // NOTE: if raw buffer is large and have two neighbor fields of raw buffers + // GetDirectBufferPointer can get all of them, use length to truncate it. + if (total_written + size_to_write > length) { + size_to_write = length - total_written; + } // TODO(gongwb): can we avoid copy? platform::CPUPlace cpu; memory::Copy(cpu, reinterpret_cast(p), cpu, data, size_to_write); p += size_to_write; - size -= size_to_write; + total_written += size_to_write; input->Skip(size_to_write); } @@ -96,7 +112,8 @@ bool ReadRaw(::google::protobuf::io::CodedInputStream* input, bool VariableResponse::CopyLodTensorData( ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { auto var = scope_->FindVar(meta_.varname()); auto* tensor = var->GetMutable(); tensor->Resize(dims); @@ -132,11 +149,17 @@ inline framework::DDim GetDims( bool VariableResponse::CopySelectRowsTensorData( ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { auto var = scope_->FindVar(meta_.varname()); auto* slr = var->GetMutable(); + slr->set_height(meta_.slr_height()); auto* tensor = slr->mutable_value(); tensor->Resize(dims); + PADDLE_ENFORCE_EQ( + static_cast(tensor->numel()), + length / framework::SizeOfType( + paddle::operators::detail::ToTypeIndex(meta_.data_type()))); void* tensor_data = tensor->mutable_data( ctx.GetPlace(), paddle::operators::detail::ToTypeIndex(meta_.data_type())); @@ -153,6 +176,8 @@ bool VariableResponse::CopySelectRowsData( const platform::DeviceContext& ctx, int length) { auto var = scope_->FindVar(meta_.varname()); auto* slr = var->GetMutable(); + slr->mutable_rows()->resize(length / + framework::SizeOfType(typeid(int64_t))); // int64 int64_t* rows_data = slr->mutable_rows()->data(); // copy rows CPU data, GPU data will be copied lazily. @@ -233,7 +258,6 @@ int VariableResponse::Parse(Source* source) { if (tag != 0) { return -1; } - return 0; } @@ -336,6 +360,14 @@ int VariableResponse::Parse(Source* source) { } break; } + case sendrecv::VariableMessage::kSlrHeightFieldNumber: { + uint64_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + return tag; + } + meta_.set_slr_height(static_cast(v)); + break; + } case sendrecv::VariableMessage::kSerializedFieldNumber: { PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || meta_.type() == sendrecv::LOD_TENSOR) && @@ -384,6 +416,20 @@ int VariableResponse::Parse(Source* source) { } break; } + case sendrecv::VariableMessage::kOutVarnameFieldNumber: { + uint32_t length; + if ((wt != WIRETYPE_LENGTH_DELIMITED) || !input.ReadVarint32(&length)) { + return tag; + } + + std::string temp; + if (!input.ReadString(&temp, length)) { + return tag; + } + + meta_.set_out_varname(temp); + break; + } default: { // Unknown tag, return unknown error. diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index e121ed7bce966d7dea94f71087f2187dcaa17cec..93b0d3cfb4f7d7f336414361773f872d7b259482 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -14,6 +14,8 @@ #pragma once +#include + #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" @@ -53,6 +55,7 @@ class VariableResponse { int Parse(const ::grpc::ByteBuffer& byte_buffer); inline std::string Varname() { return meta_.varname(); } + inline std::string OutVarname() { return meta_.out_varname(); } // should call parse first. framework::Variable* GetVar() { return scope_->FindVar(meta_.varname()); } @@ -60,14 +63,14 @@ class VariableResponse { private: bool CopySelectRowsTensorData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, - framework::DDim& dims, int length); + const framework::DDim& dims, int length); bool CopySelectRowsData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, int length); bool CopyLodTensorData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, - framework::DDim& dims, int length); + const framework::DDim& dims, int length); private: const framework::Scope* scope_; diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index b5ee86ae2d11dfc835e1a3a6826ce016baf38a29..0628b4b826d2730a8e3fb4842e4ae550b8c00569 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -11,9 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/dropout_op_test.cc b/paddle/fluid/operators/dropout_op_test.cc index db97ba4f64105c37c49cafbc3fbc4829c5077467..424d273c34b7e8d70c88b591c4fe45db61465f38 100644 --- a/paddle/fluid/operators/dropout_op_test.cc +++ b/paddle/fluid/operators/dropout_op_test.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include + #include -#include +#include // NOLINT +#include #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" @@ -30,9 +32,9 @@ namespace m = paddle::operators::math; USE_OP(dropout); -void Compare(f::Scope& scope, p::DeviceContext& ctx) { +void Compare(f::Scope* scope, const p::DeviceContext& ctx) { // init - auto var = scope.Var("X"); + auto var = scope->Var("X"); auto tensor = var->GetMutable(); tensor->Resize({10, 10}); @@ -44,12 +46,12 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { TensorFromVector(init, ctx, tensor); auto place = ctx.GetPlace(); - auto out_var = scope.Var("Out"); + auto out_var = scope->Var("Out"); auto out_tensor = out_var->GetMutable(); out_tensor->Resize({10, 10}); out_tensor->mutable_data(place); // allocate - auto mask_var = scope.Var("Mask"); + auto mask_var = scope->Var("Mask"); auto mask_tensor = mask_var->GetMutable(); mask_tensor->Resize({10, 10}); mask_tensor->mutable_data(place); // allocate @@ -63,7 +65,7 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { auto dropout_op = f::OpRegistry::CreateOp( "dropout", {{"X", {"X"}}}, {{"Out", {"Out"}}, {"Mask", {"Mask"}}}, attrs); - dropout_op->Run(scope, place); + dropout_op->Run(*scope, place); std::vector out_vec; TensorToVector(*out_tensor, ctx, &out_vec); @@ -81,6 +83,11 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { } } +// TODO(wyi): Due to +// https://github.com/PaddlePaddle/Paddle/issues/9507, I temporarily +// disable this test to remove the prevention of the merge of +// unrelated PRs. +/* TEST(Dropout, CPUDense) { f::Scope scope; p::CPUPlace place; @@ -94,3 +101,4 @@ TEST(Dropout, GPUDense) { p::CUDADeviceContext ctx(place); Compare(scope, ctx); } +*/ diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index f04d8d8fd82ed2336dff9c5b88808dc32de6630a..a33634ab2503f988a8a692682ddb238d4794a3c0 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -106,18 +107,18 @@ information. However, the output only shares the LoD information with input $X$. protected: std::string comment_; - void Replace(std::string& src, std::string from, std::string to) { + void Replace(std::string* src, std::string from, std::string to) { std::size_t len_from = std::strlen(from.c_str()); std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src.find(from); pos != std::string::npos; - pos = src.find(from, pos + len_to)) { - src.replace(pos, len_from, to); + for (std::size_t pos = src->find(from); pos != std::string::npos; + pos = src->find(from, pos + len_to)) { + src->replace(pos, len_from, to); } } void SetComment(std::string name, std::string equation) { - Replace(comment_, "{name}", name); - Replace(comment_, "{equation}", equation); + Replace(&comment_, "{name}", name); + Replace(&comment_, "{equation}", equation); } }; diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 0b4238436ffcc586fe8bc7abbe4cfbc1654dcb88..415182201a7a9e11d8ea8c62b92849b5ea3bac3e 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -13,14 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/transform.h" #ifdef __NVCC__ +#include #include -#include "paddle/fluid/platform/cuda_helper.h" constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024; #endif @@ -43,35 +44,35 @@ namespace operators { */ inline void get_mid_dims(const framework::DDim& x_dims, const framework::DDim& y_dims, const int axis, - int& pre, int& n, int& post) { - pre = 1; - n = 1; - post = 1; + int* pre, int* n, int* post) { + *pre = 1; + *n = 1; + *post = 1; for (int i = 0; i < axis; ++i) { - pre *= x_dims[i]; + (*pre) *= x_dims[i]; } for (int i = 0; i < y_dims.size(); ++i) { PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i], "Broadcast dimension mismatch."); - n *= y_dims[i]; + (*n) *= y_dims[i]; } for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { - post *= x_dims[i]; + (*post) *= x_dims[i]; } } -inline void trim_trailing_singular_dims(framework::DDim& dims) { +inline void trim_trailing_singular_dims(framework::DDim* dims) { // Remove trailing dimensions of size 1 for y - auto actual_dims_size = dims.size(); + auto actual_dims_size = dims->size(); for (; actual_dims_size != 0; --actual_dims_size) { - if (dims[actual_dims_size - 1] != 1) break; + if ((*dims)[actual_dims_size - 1] != 1) break; } - if (actual_dims_size != dims.size()) { - auto actual_dims = framework::vectorize(dims); + if (actual_dims_size != dims->size()) { + auto actual_dims = framework::vectorize(*dims); actual_dims.resize(actual_dims_size); - dims = framework::make_ddim(actual_dims); + *dims = framework::make_ddim(actual_dims); } } @@ -159,7 +160,7 @@ class RowwiseTransformIterator RowwiseTransformIterator, const T*> super_t; HOSTDEVICE RowwiseTransformIterator(const T* x, int n) - : super_t(x), begin_(x), n_(n){}; + : super_t(x), begin_(x), n_(n) {} friend class thrust::iterator_core_access; private: @@ -179,7 +180,7 @@ class MidWiseTransformIterator MidWiseTransformIterator, const T*> super_t; HOSTDEVICE MidWiseTransformIterator(const T* x, int n, int post) - : super_t(x), begin_(x), n_(n), post_(post){}; + : super_t(x), begin_(x), n_(n), post_(post) {} friend class thrust::iterator_core_access; private: @@ -333,6 +334,55 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out, } } #ifdef __NVCC__ + +// __shfl_down has been deprecated as of CUDA 9.0. +#if CUDA_VERSION < 9000 +template +__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { + return __shfl_down(val, delta); +} +#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; +#else +#define FULL_WARP_MASK 0xFFFFFFFF +#define CREATE_SHFL_MASK(mask, predicate) \ + mask = __ballot_sync(FULL_WARP_MASK, (predicate)) +#endif + +template +__device__ T reduceSum(T val, int tid, int len) { + // TODO(zcd): The warp size should be taken from the + // parameters of the GPU but not specified as 32 simply. + // To make the reduceSum more efficiently, + // I use Warp-Level Parallelism and assume the Warp size + // is 32 which may be different for different GPU, + // but most card's warp size is 32. + __shared__ T shm[32]; + const int warpSize = 32; + unsigned mask = 0u; + CREATE_SHFL_MASK(mask, tid < len); + + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += __shfl_down_sync(mask, val, offset); + + if (tid < warpSize) shm[tid] = 0; + + __syncthreads(); + + if (tid % warpSize == 0) { + shm[tid / warpSize] = val; + } + + CREATE_SHFL_MASK(mask, tid < warpSize); + + if (tid < warpSize) { + val = shm[tid]; + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += __shfl_down_sync(mask, val, offset); + } + + return val; +} + template static __global__ void ElemwiseGradBroadcast1CUDAKernel( const T* x, const T* y, const T* out, const T* dout, int h, int w, @@ -355,7 +405,7 @@ static __global__ void ElemwiseGradBroadcast1CUDAKernel( if (dy) { h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; - val = platform::reduceSum(val, tid, h); + val = reduceSum(val, tid, h); if (threadIdx.x == 0) { dy[j] = val; } @@ -432,7 +482,7 @@ static __global__ void ElemwiseGradBroadcast2CUDAKernel( if (dy) { int h = pre * post; h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; - val = platform::reduceSum(val, tid, h); + val = reduceSum(val, tid, h); if (threadIdx.x == 0) { dy[j] = val; } @@ -472,11 +522,11 @@ void ElemwiseGradCompute(const framework::ExecutionContext& ctx, auto y_dim = y.dims(); axis = (axis == -1 ? x_dim.size() - y_dim.size() : axis); - trim_trailing_singular_dims(y_dim); + trim_trailing_singular_dims(&y_dim); axis = (y_dim.size() == 0) ? x_dim.size() : axis; int pre, n, post; - get_mid_dims(x_dim, y_dim, axis, pre, n, post); + get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post); if (post == 1) { int h = pre; int w = n; @@ -514,7 +564,7 @@ void ElemwiseGradCompute(const framework::ExecutionContext& ctx, } } } -}; +} template @@ -543,11 +593,11 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx, } axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); - trim_trailing_singular_dims(y_dims); + trim_trailing_singular_dims(&y_dims); axis = (y_dims.size() == 0) ? x_dims.size() : axis; int pre, n, post; - get_mid_dims(x_dims, y_dims, axis, pre, n, post); + get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post); if (post == 1) { broadcastfunctor f; @@ -582,11 +632,11 @@ void ElementwiseComputeEx(const framework::ExecutionContext& ctx, axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), "Axis should be in range [0, x_dims)"); - trim_trailing_singular_dims(y_dims); + trim_trailing_singular_dims(&y_dims); axis = (y_dims.size() == 0) ? x_dims.size() : axis; int pre, n, post; - get_mid_dims(x_dims, y_dims, axis, pre, n, post); + get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post); if (post == 1) { functor.RunRowWise(n, pre); return; diff --git a/paddle/fluid/operators/fc_mkldnn_op.cc b/paddle/fluid/operators/fc_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..847b7b0c12e1679501dbe83d578b23ca2aef3e9e --- /dev/null +++ b/paddle/fluid/operators/fc_mkldnn_op.cc @@ -0,0 +1,303 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/fc_op.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; + +template +class MKLDNNMD { + public: + explicit MKLDNNMD(const T* in, const T* w, bool bias) + : in(paddle::framework::vectorize2int(in->dims())), + w(paddle::framework::vectorize2int(w->dims())) { + with_bias_ = bias; + } + + mkldnn::memory::desc dst() const { + return platform::MKLDNNMemDesc({in[0], w[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nc); + } + + mkldnn::memory::desc src() const { + return is_spatial() + ? platform::MKLDNNMemDesc({in[0], in[1], in[2], in[3]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nchw) + : platform::MKLDNNMemDesc({in[0], in[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nc); + } + + mkldnn::memory::desc weights() const { + return is_spatial() + ? platform::MKLDNNMemDesc({w[1], in[1], in[2], in[3]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::oihw) + : platform::MKLDNNMemDesc({w[1], in[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::oi); + } + + mkldnn::memory::desc bias() const { + return with_bias_ + ? platform::MKLDNNMemDesc({w[1]}, mkldnn::memory::data_type::f32, + mkldnn::memory::format::format_undef) + : platform::MKLDNNMemDesc({}, mkldnn::memory::data_type::f32, + mkldnn::memory::format::format_undef); + } + + private: + bool is_spatial() const { return in.size() > 1 && w.size() > 1; } + + std::vector in; + std::vector w; + bool with_bias_; + bool is_spatial_; +}; + +class MKLDNNMemory { + public: + MKLDNNMemory(MKLDNNMD* t, const mkldnn::engine& e) + : md_(t), engine_(e) {} + virtual ~MKLDNNMemory() = default; + + template + mkldnn::memory dst(const Output* out) { + return mkldnn::memory({md_->dst(), engine_}, + static_cast(const_cast(out))); + } + + template + mkldnn::memory dst(Output* out) { + return mkldnn::memory({md_->dst(), engine_}, out); + } + + template + mkldnn::memory src(const Input* in) { + return mkldnn::memory({md_->src(), engine_}, + static_cast(const_cast(in))); + } + + template + mkldnn::memory weights(const Weight* w) { + return mkldnn::memory({md_->weights(), engine_}, + static_cast(const_cast(w))); + } + + mkldnn::memory bias() { + return mkldnn::memory(mkldnn::memory::primitive_desc(md_->bias(), engine_)); + } + + private: + MKLDNNMD* md_; + const mkldnn::engine& engine_; +}; + +template +class FCMKLDNNOpKernel : public paddle::framework::OpKernel { + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + auto input = ctx.Input("Input"); + auto w = ctx.Input("W"); + + PADDLE_ENFORCE(input->dims().size() == 2 || input->dims().size() == 4, + "Input must be with 2 or 4 dimensions, i.e. NCHW"); + PADDLE_ENFORCE(w->dims().size() == 2 || w->dims().size() == 4, + "Weights must be with 2 or 4 dimensions, i.e. OI or OIHW"); + + bool with_bias = ctx.Attr("bias_attr"); + MKLDNNMD md(input, w, with_bias); + + std::shared_ptr pd = + FcFwdPrimitiveDesc(md.src(), md.weights(), md.dst(), md.bias(), + with_bias, mkldnn_engine); + + const std::string key = ctx.op().Output("Out"); + const std::string key_fc_pd = key + "@fc_pd"; + + dev_ctx.SetBlob(key_fc_pd, pd); + + MKLDNNMemory mem(&md, mkldnn_engine); + + const T* input_data = input->data(); + const T* w_data = w->data(); + + auto output = ctx.Output("Out"); + T* output_data = output->mutable_data(ctx.GetPlace()); + + auto dst_memory = mem.dst(output_data); + auto src_memory = mem.src(input_data); + auto weights_memory = mem.weights(w_data); + auto bias_memory = mem.bias(); + + auto forward = with_bias ? mkldnn::inner_product_forward( + *pd, src_memory, weights_memory, bias_memory, + dst_memory) + : mkldnn::inner_product_forward( + *pd, src_memory, weights_memory, dst_memory); + + std::vector pipeline = {forward}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + + private: + std::unique_ptr + FcFwdPrimitiveDesc(const mkldnn::memory::desc& src, + const mkldnn::memory::desc& weights, + const mkldnn::memory::desc& dst, + const mkldnn::memory::desc& bias, const bool with_bias, + const mkldnn::engine& engine) const { + auto desc = with_bias + ? mkldnn::inner_product_forward::desc( + mkldnn::prop_kind::forward, src, weights, bias, dst) + : mkldnn::inner_product_forward::desc( + mkldnn::prop_kind::forward, src, weights, dst); + + auto pd = new mkldnn::inner_product_forward::primitive_desc(desc, engine); + return std::unique_ptr(pd); + } +}; + +template +class FCMKLDNNGradOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + T* input_grad_data = nullptr; + T* w_grad_data = nullptr; + + Tensor* input_grad = ctx.Output(framework::GradVarName("Input")); + Tensor* w_grad = ctx.Output(framework::GradVarName("W")); + + if (input_grad) { + input_grad_data = input_grad->mutable_data(ctx.GetPlace()); + } + if (w_grad) { + w_grad_data = w_grad->mutable_data(ctx.GetPlace()); + } + + const Tensor* input = ctx.Input("Input"); + const T* input_data = input->data(); + + const Tensor* w = ctx.Input("W"); + const T* w_data = w->data(); + + const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); + const T* out_grad_data = out_grad->data(); + + bool with_bias = ctx.Attr("bias_attr"); + + MKLDNNMD md(input, w, with_bias); + MKLDNNMemory mem(&md, mkldnn_engine); + + auto dst_memory = mem.dst(out_grad_data); + auto src_memory = mem.src(input_data); + auto weights_memory = mem.weights(w_data); + auto bias_memory = mem.bias(); + + const std::string key = ctx.op().Input("Out"); + const std::string key_fc_pd = key + "@fc_pd"; + + auto pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_fc_pd)); + + PADDLE_ENFORCE(pd != nullptr, "Fail to find key_fc_pd in device context"); + + if (w_grad) { + auto weights_grad_memory = mem.weights(w_grad_data); + + mkldnn::inner_product_backward_weights::primitive_desc bwd_weight_pd = + FcBwdWeightsPrimitiveDesc(md.src(), md.weights(), md.dst(), md.bias(), + with_bias, *pd, mkldnn_engine); + + auto bwd_weights_prim = mkldnn::inner_product_backward_weights( + bwd_weight_pd, src_memory, dst_memory, weights_grad_memory, + bias_memory); + + std::vector pipeline{bwd_weights_prim}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + + if (input_grad) { + auto src_grad_memory = mem.src(input_grad_data); + + mkldnn::inner_product_backward_data::primitive_desc bwd_data_pd = + FcBwdDataPrimitiveDesc(md.src(), md.weights(), md.dst(), *pd, + mkldnn_engine); + + auto bwd_data_prim = mkldnn::inner_product_backward_data( + bwd_data_pd, dst_memory, weights_memory, src_grad_memory); + + std::vector pipeline{bwd_data_prim}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + } + + private: + mkldnn::inner_product_backward_weights::primitive_desc + FcBwdWeightsPrimitiveDesc( + const mkldnn::memory::desc& src, const mkldnn::memory::desc& diff_weights, + const mkldnn::memory::desc& diff_dst, const mkldnn::memory::desc& bias, + const bool with_bias, + const mkldnn::inner_product_forward::primitive_desc& pd, + const mkldnn::engine& engine) const { + auto bwd_weight_desc = with_bias + ? mkldnn::inner_product_backward_weights::desc( + src, diff_weights, bias, diff_dst) + : mkldnn::inner_product_backward_weights::desc( + src, diff_weights, bias, diff_dst); + + return mkldnn::inner_product_backward_weights::primitive_desc( + bwd_weight_desc, engine, pd); + } + + mkldnn::inner_product_backward_data::primitive_desc FcBwdDataPrimitiveDesc( + const mkldnn::memory::desc& diff_src, const mkldnn::memory::desc& weights, + const mkldnn::memory::desc& diff_dst, + const mkldnn::inner_product_forward::primitive_desc& pd, + const mkldnn::engine& engine) const { + auto bwd_data_desc = + mkldnn::inner_product_backward_data::desc(diff_src, weights, diff_dst); + return mkldnn::inner_product_backward_data::primitive_desc(bwd_data_desc, + engine, pd); + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OP_KERNEL(fc, MKLDNN, ::paddle::platform::CPUPlace, + paddle::operators::FCMKLDNNOpKernel); + +REGISTER_OP_KERNEL(fc_grad, MKLDNN, ::paddle::platform::CPUPlace, + paddle::operators::FCMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..381771f157d78fb04e54f0a07c40e4df2c91441a --- /dev/null +++ b/paddle/fluid/operators/fc_op.cc @@ -0,0 +1,102 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fc_op.h" +#include + +namespace paddle { +namespace operators { + +void FCOp::InferShape(framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "X(Input) of Fully Connected should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Out(Output) of Fully Connected should not be null."); + PADDLE_ENFORCE(ctx->HasInput("W"), + "W(Input) of Fully Connected should not be null."); + + auto in_dims = ctx->GetInputDim("Input"); + auto w_dims = ctx->GetInputDim("W"); + std::vector output_shape({in_dims[0], w_dims[1]}); + + PADDLE_ENFORCE(in_dims.size() == 2 || in_dims.size() == 4, + "Fully Connected input should be 2-D or 4-D tensor."); + + PADDLE_ENFORCE(w_dims.size() == 2 || w_dims.size() == 4, + "Fully Connected input should be 2-D or 4-D tensor."); + + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + ctx->ShareLoD("Input", "Out"); +} + +framework::OpKernelType FCOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library{framework::LibraryType::kMKLDNN}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout, library); +} + +void FCOpGrad::InferShape(framework::InferShapeContext* ctx) const { + auto in_dims = ctx->GetInputDim("Input"); + auto w_dims = ctx->GetInputDim("W"); + + if (ctx->HasOutput(framework::GradVarName("Input"))) { + ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); + } + if (ctx->HasOutput(framework::GradVarName("W"))) { + ctx->SetOutputDim(framework::GradVarName("W"), w_dims); + } +} + +framework::OpKernelType FCOpGrad::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library{framework::LibraryType::kMKLDNN}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout, library); +} + +FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", "(Tensor) The input tensor of fully connected operator. "); + AddInput("W", "(Tensor), The second input tensor of fc op."); + AddOutput("Out", "(Tensor) The output tensor of fully connected operator. "); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr("bias_attr", "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddComment(R"DOC( + Fully Connected Operator. + + The fully connected operation calculates the output based on the input, weights and bias attribute. + The size of each dimension of the parameters checked in the infer-shape. + The matrix of bias is generated by the mkldnn framework, when the bias_attr is True. + Additional parametrs are use_mkldnn and bias_attr. + The input(X) size and output(Out) size may be diffrent. + + The fully connected layer only supports MKLDNN version +)DOC"); +} + +} // namespace operators +} // namespace paddle + +REGISTER_OP(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker, fc_grad, + paddle::operators::FCOpGrad); diff --git a/paddle/fluid/operators/fc_op.h b/paddle/fluid/operators/fc_op.h new file mode 100644 index 0000000000000000000000000000000000000000..70fa96440d344397a7427c1338afee85bde923d4 --- /dev/null +++ b/paddle/fluid/operators/fc_op.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class FCOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FCOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FCOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FCOpMaker(OpProto* proto, OpAttrChecker* op_checker); +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/go_op.cc b/paddle/fluid/operators/go_op.cc index cfa797717d78aa72e1b931b6db6e153270b3424e..58fe32446217e07235b40b9b78190094e57e4951 100644 --- a/paddle/fluid/operators/go_op.cc +++ b/paddle/fluid/operators/go_op.cc @@ -56,11 +56,11 @@ class GoOp : public framework::OperatorBase { // TODO(varunarora): Consider moving this root scope lookup to scope.h. const framework::Scope *root_scope = &scope; - const framework::Scope *parent_scope = &(root_scope->parent()); + const framework::Scope *parent_scope = root_scope->parent(); while (parent_scope != nullptr) { root_scope = parent_scope; - parent_scope = &(parent_scope->parent()); + parent_scope = parent_scope->parent(); } framework::BlockDesc *block = Attr(kBlock); diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index 2a91dcbcd418fcd61445b7d744789bdeee11d2f2..2490b83b8c50ce4a68095be10d78a380174c1a3f 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/gru_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 0886bebc41d8b0f28745e88685f3954f86c823a1..1d5c291495c0f0c0d8da9ff6949888b4cbb6036d 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -13,15 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/gru_compute.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence2batch.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" - namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 048391549dd8df24cc215d04431c306ac4c7e5be..5b387d8d344dfc3475a537827acd9e125fe6693c 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/im2sequence_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index a6a83fefbc6266fa718dcad78b3a018526f124db..d792c68f784d8ffec0eb303a6ab9b59c9f121fa7 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -13,7 +13,7 @@ limitations under the License. */ #pragma once - +#include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index 6b5c3db13c0929ae0dd2fb2c981867df0a36c1ce..ec2e641679fedec776d48716f13445f44375ce3d 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -1,71 +1,46 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/op_registry.h" +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/increment_op.h" namespace paddle { namespace operators { -class IncrementInferShape : public framework::InferShapeBase { +class IncrementOp : public framework::OperatorWithKernel { public: - void operator()(framework::InferShapeContext *ctx) const override { + IncrementOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of IncrementOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of IncrementOp should not be null."); PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X"))); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", "Out"); } -}; - -struct IncrementFunctor { - IncrementFunctor(const framework::LoDTensor &x, framework::LoDTensor *out, - float value) - : x_(x), out_(out), value_(value) {} - - template - void operator()() const { - *out_->data() = *x_.data() + static_cast(value_); - } - - const framework::LoDTensor &x_; - framework::LoDTensor *out_; - float value_; -}; - -class IncrementOp : public framework::OperatorBase { - public: - IncrementOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - private: - void RunImpl(const framework::Scope &scope, - const platform::Place &place) const override { - auto &x = scope.FindVar(Input("X"))->Get(); - auto &out = - *scope.FindVar(Output("Out"))->GetMutable(); - PADDLE_ENFORCE(platform::is_cpu_place(x.place())); - out.Resize(x.dims()); - out.mutable_data(x.place(), x.type()); - float value = Attr("step"); - VLOG(10) << Output("Out") << " increase " << Input("X") << " with " - << value; - framework::VisitDataType(framework::ToDataType(out.type()), - IncrementFunctor(x, &out, value)); + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx); + // IncrementOp kernel's device type is decided by input tensor place + kt.place_ = ctx.Input("X")->place(); + return kt; } }; @@ -108,5 +83,10 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementInferShape, - ops::IncrementOpMaker, ops::IncrementGradOpMaker); +REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker, + ops::IncrementGradOpMaker); +REGISTER_OP_CPU_KERNEL( + increment, ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel) diff --git a/paddle/fluid/operators/increment_op.cu b/paddle/fluid/operators/increment_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..7fb6425fe994751c4d7a025bb62e43a84c8d95c2 --- /dev/null +++ b/paddle/fluid/operators/increment_op.cu @@ -0,0 +1,22 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/increment_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + increment, ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel, + ops::IncrementKernel) diff --git a/paddle/fluid/operators/increment_op.h b/paddle/fluid/operators/increment_op.h new file mode 100644 index 0000000000000000000000000000000000000000..d0e8c66255ef68b975701fb6b3c145be2590e271 --- /dev/null +++ b/paddle/fluid/operators/increment_op.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class IncrementKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x_tensor = context.Input("X"); + auto* out_tensor = context.Output("Out"); + float step = context.Attr("step"); + + out_tensor->mutable_data(context.GetPlace()); + auto& dev = + *context.template device_context().eigen_device(); + framework::EigenScalar::From(*out_tensor).device(dev) = + framework::EigenScalar::From(*x_tensor) + static_cast(step); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index eef25f8a06ddb3311f3cfea21b64d8f7d7e58f24..c2a8c7f867a4483a7fda2f4336a64ab109ce86e8 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/label_smooth_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index 800a1303e1a427e7bd5e6c04354b8a5fbd816712..d5162bcd742c05980c89394b5d011bd078b61211 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -100,7 +100,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { auto x_row_max = EigenMatrix::From(emission_row_max); x_row_max.device(place) = x.maximum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(int(batch_size), 1)); + .reshape(Eigen::DSizes(static_cast(batch_size), 1)); auto x_exps = EigenMatrix::From(*emission_exps); x_exps.device(place) = diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 08b83375dd5462e67c3da2c6c7401dd5e54793f0..5d293665f0bcc098126ad3ec6c9bf34ff54c3b6f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -12,29 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include #include -#include +#include // NOLINT +#include -#include - -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/proto_desc.h" -#include "paddle/fluid/framework/threadpool.h" -#include "paddle/fluid/operators/detail/grpc_server.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" -#include "paddle/fluid/operators/detail/simple_block_queue.h" -#include "paddle/fluid/string/printf.h" +#include "paddle/fluid/operators/listen_and_serv_op.h" namespace paddle { namespace operators { -constexpr char kOptimizeBlock[] = "OptimizeBlock"; - void RunServer(std::shared_ptr service) { service->RunSyncUpdate(); VLOG(4) << "RunServer thread end"; @@ -54,138 +40,169 @@ static void CreateTensorFromMessageType(framework::Variable *var, } } -class ListenAndServOp : public framework::OperatorBase { - public: - ListenAndServOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) { - if (!rpc_service_) { - std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); - server_thread_.reset(new std::thread(RunServer, rpc_service_)); - } +static void ParallelExecuteBlocks( + const std::vector ¶llel_blkids, framework::Executor *executor, + const std::vector> + &prepared, + framework::ProgramDesc *program, framework::Scope *scope) { + std::vector> fs; + for (size_t idx : parallel_blkids) { + fs.push_back( + framework::Async([&executor, &prepared, &program, &scope, idx]() { + int run_block = idx; // thread local + try { + executor->RunPreparedContext(prepared[run_block].get(), scope, + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + })); } + for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); +} + +ListenAndServOp::ListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + +int ListenAndServOp::GetSelectedPort() { + return rpc_service_->GetSelectedPort(); +} - void Stop() override { - rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); - rpc_service_->ShutDown(); - server_thread_->join(); +void ListenAndServOp::Stop() { + rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); + server_thread_->join(); +} + +void ListenAndServOp::RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const { + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + framework::Scope &recv_scope = scope.NewScope(); + + if (!rpc_service_) { + std::string endpoint = Attr("endpoint"); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); } - void RunImpl(const framework::Scope &scope, - const platform::Place &dev_place) const override { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - framework::Scope &recv_scope = scope.NewScope(); - - // FIXME(Yancey1989): initialize rpc server with lazy mode. - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - auto ins = Inputs("X"); - auto fan_in = Attr("Fanin"); - - auto *block = Attr(kOptimizeBlock); - auto *program = block->Program(); - int num_blocks = program->Size(); - PADDLE_ENFORCE_GE(num_blocks, 2, - "server program should have at least 2 blocks"); - - framework::Executor executor(dev_place); - - // TODO(typhoonzero): change this to a while_op for every cluster-batch. - bool exit_flag = false; - // Record received sparse variables, so that - // we could reset those after execute optimize program - std::vector sparse_vars; - while (!exit_flag) { - // Get from multiple trainers, we don't care about the order in which - // the gradients arrives, just add suffix 0~n and merge the gradient. - rpc_service_->SetCond(0); - size_t recv_var_cnt = 0; - int batch_barrier = 0; - while (batch_barrier != fan_in) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { - VLOG(3) << "recv batch barrier message"; - batch_barrier++; - continue; - } else { - VLOG(3) << "received grad: " << recv_var_name; - recv_var_cnt++; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - if (var->IsType()) { - sparse_vars.push_back(var); - } - } - } - if (exit_flag) { - rpc_service_->SetCond(1); - rpc_service_->ShutDown(); + auto ins = Inputs("X"); + auto fan_in = Attr("Fanin"); + auto *optimize_block = Attr(kOptimizeBlock); + auto *prefetch_block = Attr(kPrefetchBlock); + auto *program = optimize_block->Program(); + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + framework::Executor executor(dev_place); + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + if (blkid != prefetch_block->ID()) { + block_list.push_back(blkid); + } + } + auto optimize_prepared = executor.Prepare(*program, block_list); + // Insert placeholder for block0 which holds current op itself. + optimize_prepared.insert( + optimize_prepared.begin(), + std::shared_ptr(nullptr)); + + rpc_service_->SetScope(&recv_scope); + rpc_service_->SetDevCtx(&dev_ctx); + // TODO(qiao) set proper fields for table lookup and update + rpc_service_->SetExecutor(&executor); + VLOG(3) << "prefetch block id is " << prefetch_block->ID(); + auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); + rpc_service_->SetPrefetchBlkdId(prefetch_block->ID()); + rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); + prefetch_prepared.release(); + rpc_service_->SetProgram(program); + // start the server listening after all member initialized. + server_thread_.reset(new std::thread(RunServer, rpc_service_)); + // FIXME(typhoonzero): do we need to wait until the server port is ready? + sleep(5); + + // TODO(typhoonzero): change this to a while_op for every cluster-batch. + bool exit_flag = false; + // Record received sparse variables, so that + // we could reset those after execute optimize program + std::vector sparse_vars; + while (!exit_flag) { + // Get from multiple trainers, we don't care about the order in which + // the gradients arrives, just add suffix 0~n and merge the gradient. + rpc_service_->SetCond(0); + size_t recv_var_cnt = 0; + int batch_barrier = 0; + while (batch_barrier != fan_in) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; break; - } - - // put optimize blocks in the thread pool to start run, the last block - // should be global ops. - // NOTE: if is_gpu_place, CUDA kernels are laugched by multiple threads - // and this will still work. - - std::vector> fs; - // block0 contains only listen_and_serv op, start run from block1. - for (int blkid = 1; blkid < num_blocks - 1; ++blkid) { - fs.push_back( - framework::Async([&executor, &program, &recv_scope, blkid]() { - int run_block = blkid; // thread local - try { - executor.Run(*program, &recv_scope, run_block, false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - })); - } - for (int i = 0; i < num_blocks - 2; ++i) fs[i].wait(); - // Run global block at final step, or block1 if there are only 2 blocks - if (num_blocks >= 2) { - try { - executor.Run(*program, &recv_scope, num_blocks - 1, false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); + } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "recv batch barrier message"; + batch_barrier++; + continue; + } else { + VLOG(3) << "received grad: " << recv_var_name; + recv_var_cnt++; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + if (var->IsType()) { + sparse_vars.push_back(var); } } - - // Reset the received sparse variables, the sum operator would not - // sum the input sparse variables which rows is empty at the next - // mini-batch. - // TODO(Yancey1989): move the reset action into an operator, we couldn't - // have any hide logic in the operator. - for (auto &var : sparse_vars) { - var->GetMutable()->mutable_rows()->clear(); - } + } + if (exit_flag) { rpc_service_->SetCond(1); - // FIXME(typhoonzero): use another condition to sync wait clients get. - rpc_service_->WaitClientGet(fan_in); - sparse_vars.clear(); - } // while(true) - - // for (int i = 0; i < num_blocks; ++i) { - // delete blk_ctx_list[i]; - // } - } + rpc_service_->ShutDown(); + break; + } - protected: - std::shared_ptr rpc_service_; - std::shared_ptr server_thread_; -}; + // NOTE: if is_gpu_place, CUDA kernels are laugched by multiple threads + // and this will still work. + + // The optimize blocks which have the same parent ID would run parallel + // TODO(Yancey1989): need to use ParallelExecutor for future + int32_t last_parent_blkid = program->Block(1).Parent(); + std::vector parallel_blkids; + parallel_blkids.push_back(1); + double ts = detail::GetTimestamp(); + for (size_t blkid = 2; blkid < num_blocks; ++blkid) { + if (blkid != prefetch_block->ID()) { + if (program->Block(blkid).Parent() != last_parent_blkid) { + ParallelExecuteBlocks(parallel_blkids, &executor, optimize_prepared, + program, &recv_scope); + parallel_blkids.clear(); + last_parent_blkid = program->Block(blkid).Parent(); + } + parallel_blkids.push_back(blkid); + } + } + ParallelExecuteBlocks(parallel_blkids, &executor, optimize_prepared, + program, &recv_scope); + VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; + + // Reset the received sparse variables, the sum operator would not + // sum the input sparse variables which rows is empty at the next + // mini-batch. + // TODO(Yancey1989): move the reset action into an operator, we couldn't + // have any hide logic in the operator. + for (auto &var : sparse_vars) { + var->GetMutable()->mutable_rows()->clear(); + } + rpc_service_->SetCond(1); + // FIXME(typhoonzero): use another condition to sync wait clients get. + rpc_service_->WaitClientGet(fan_in); + sparse_vars.clear(); + } // while(true) +} class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -205,6 +222,8 @@ from send_op and send back variables to recv_op. .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr(kOptimizeBlock, "BlockID to run on server side."); + AddAttr(kPrefetchBlock, + "prefetch block to run on server side."); AddAttr("Fanin", "How many clients send to this server.") .SetDefault(1); } diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h new file mode 100644 index 0000000000000000000000000000000000000000..759b2a462ba5b938991aa86be9b9dc3e59fe3f7e --- /dev/null +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/detail/grpc_server.h" + +namespace paddle { +namespace operators { + +constexpr char kOptimizeBlock[] = "OptimizeBlock"; +constexpr char kPrefetchBlock[] = "PrefetchBlock"; + +void RunServer(std::shared_ptr service); + +class ListenAndServOp : public framework::OperatorBase { + public: + ListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs); + + int GetSelectedPort(); + + void Stop() override; + + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override; + + protected: + mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr server_thread_; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index 99f01c2a255ade81421c2bba95ff3d38ced6f87c..bd19d8908e35e51872d324ea5aa6bb02110d5a92 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -35,7 +37,7 @@ class LoDResetKernel : public framework::OpKernel { if (lod_t->lod().size() > 0) { auto y_lod = lod_t->lod(); auto last_level = y_lod[y_lod.size() - 1]; - PADDLE_ENFORCE_EQ(last_level.back(), in->dims()[0], + PADDLE_ENFORCE_EQ((int64_t)(last_level.back()), in->dims()[0], "Last value of `Y`'s last level LoD should be equal " "to the first dimension of `X`"); out->set_lod(y_lod); diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index 6a7db31cf36f31064259abeb0348e682be9f917c..41aa00ee8ac10e0776c066fc3c37f97b0dd40cc3 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/logical_op.h" +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 50eeadab72e71f39325c5eda69e9a3c3e6517d7d..5e59bd1b178ad1803f6f70c5f3f9fd7af495ac3c 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -51,9 +51,8 @@ class LookupTableOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("W")->type()), - ctx.device_context()); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -79,12 +78,15 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { "(boolean, default false) " "Sparse update.") .SetDefault(false); + AddAttr("is_distributed", + "(boolean, default false) distributed lookup table.") + .SetDefault(false); AddAttr("padding_idx", "(int64, default -1) " "If the value is -1, it makes no effect to lookup. " "Otherwise the given value indicates padding the output " "with zeros whenever lookup encounters it in Ids.") - .SetDefault(-1); + .SetDefault(kNoPadding); AddComment(R"DOC( Lookup Table Operator. @@ -124,9 +126,8 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("W")->type()), - ctx.device_context()); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index c92ce78eeffb8f1517e61c6d6624d406e04d974d..cb088c267bcc028ff11583cd73de5ca1722a9b69 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -25,16 +28,33 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; +using DDim = framework::DDim; + +constexpr int64_t kNoPadding = -1; template class LookupTableKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - auto* table_t = context.Input("W"); - auto* ids_var = context.InputVar("Ids"); - Tensor* output_t = context.Output("Out"); + void Compute(const framework::ExecutionContext &context) const override { + auto *table_var = context.InputVar("W"); + auto *ids_var = context.InputVar("Ids"); + Tensor *output_t = context.Output("Out"); + int64_t padding_idx = context.Attr("padding_idx"); + + DDim table_dim; - int64_t* ids; + if (table_var->IsType()) { + table_dim = context.Input("W")->dims(); + } else if (table_var->IsType()) { + auto *table_t = context.Input("W"); + table_dim = table_t->value().dims(); + } else { + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); + } + + int64_t *ids; int64_t ids_numel; // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type @@ -42,39 +62,50 @@ class LookupTableKernel : public framework::OpKernel { // when Ids's type is SelectedRows, the rows of Ids contains the // ids to be looked up in W. if (ids_var->IsType()) { - auto* ids_t = context.Input("Ids"); - ids = const_cast(ids_t->data()); + auto *ids_t = context.Input("Ids"); + ids = const_cast(ids_t->data()); ids_numel = ids_t->numel(); } else if (ids_var->IsType()) { - auto* ids_t = context.Input("Ids"); - ids = const_cast(ids_t->rows().data()); + auto *ids_t = context.Input("Ids"); + ids = const_cast(ids_t->rows().data()); ids_numel = ids_t->rows().size(); - output_t->Resize({ids_numel, table_t->dims()[1]}); + output_t->Resize({ids_numel, table_dim[1]}); } else { PADDLE_THROW("Unsupported Variable Type of Ids"); } - int64_t padding_idx = context.Attr("padding_idx"); + if (table_var->IsType()) { + auto *table_t = context.Input("W"); + int64_t row_number = table_t->dims()[0]; + int64_t row_width = table_t->dims()[1]; - int N = table_t->dims()[0]; - int D = table_t->dims()[1]; - auto* table = table_t->data(); - auto* output = output_t->mutable_data(context.GetPlace()); + auto *table = table_t->data(); + auto *output = output_t->mutable_data(context.GetPlace()); - if (padding_idx == -1) { for (int64_t i = 0; i < ids_numel; ++i) { - PADDLE_ENFORCE_LT(ids[i], N); - PADDLE_ENFORCE_GE(ids[i], 0); - memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); + if (padding_idx != kNoPadding && ids[i] == padding_idx) { + memset(output + i * row_width, 0, row_width * sizeof(T)); + } else { + PADDLE_ENFORCE_LT(ids[i], row_number); + PADDLE_ENFORCE_GE(ids[i], 0); + memcpy(output + i * row_width, table + ids[i] * row_width, + row_width * sizeof(T)); + } } - } else { + } else if (table_var->IsType()) { + const auto &table_t = table_var->Get(); + int64_t row_width = table_t.value().dims()[1]; + const auto *table = table_t.value().data(); + auto *output = output_t->mutable_data(context.GetPlace()); + for (int64_t i = 0; i < ids_numel; ++i) { - if (ids[i] == padding_idx) { - memset(output + i * D, 0, D * sizeof(T)); + if (padding_idx != kNoPadding && ids[i] == padding_idx) { + memset(output + i * row_width, 0, row_width * sizeof(T)); } else { - PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); - memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); + auto id_index = table_t.index(ids[i]); + memcpy(output + i * row_width, table + id_index * row_width, + row_width * sizeof(T)); } } } @@ -84,17 +115,29 @@ class LookupTableKernel : public framework::OpKernel { template class LookupTableGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { + void Compute(const framework::ExecutionContext &context) const override { + auto *table_var = context.InputVar("W"); + DDim table_dim; + if (table_var->IsType()) { + table_dim = context.Input("W")->dims(); + } else if (table_var->IsType()) { + auto *table_t = context.Input("W"); + table_dim = table_t->value().dims(); + } else { + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); + } + bool is_sparse = context.Attr("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { - auto* ids = context.Input("Ids"); - auto* table = context.Input("W"); - auto* d_output = context.Input(framework::GradVarName("Out")); - auto* d_table = context.Output(framework::GradVarName("W")); + auto *ids = context.Input("Ids"); + auto *d_output = context.Input(framework::GradVarName("Out")); + auto *d_table = context.Output(framework::GradVarName("W")); - auto* ids_data = ids->data(); + auto *ids_data = ids->data(); auto ids_dim = ids->dims(); framework::Vector new_rows; @@ -104,31 +147,30 @@ class LookupTableGradKernel : public framework::OpKernel { } d_table->set_rows(new_rows); - auto* d_table_value = d_table->mutable_value(); - d_table_value->Resize({ids_dim[0], table->dims()[1]}); + auto *d_table_value = d_table->mutable_value(); + d_table_value->Resize({ids_dim[0], table_dim[1]}); d_table_value->mutable_data(context.GetPlace()); - d_table->set_height(table->dims()[0]); + d_table->set_height(table_dim[0]); - auto* d_output_data = d_output->data(); - auto* d_table_data = d_table_value->data(); + auto *d_output_data = d_output->data(); + auto *d_table_data = d_table_value->data(); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { - auto* ids = context.Input("Ids"); - auto* d_output = context.Input(framework::GradVarName("Out")); - auto* d_table = context.Output(framework::GradVarName("W")); - auto* table = context.Input("W"); + auto *ids = context.Input("Ids"); + auto *d_output = context.Input(framework::GradVarName("Out")); + auto *d_table = context.Output(framework::GradVarName("W")); - auto* ids_data = ids->data(); + auto *ids_data = ids->data(); auto ids_dim = ids->dims(); - int N = table->dims()[0]; + int N = table_dim[0]; int D = d_output->dims()[1]; - auto* d_output_data = d_output->data(); - auto* d_table_data = d_table->mutable_data(context.GetPlace()); + auto *d_output_data = d_output->data(); + auto *d_table_data = d_table->mutable_data(context.GetPlace()); memset(d_table_data, 0, d_table->numel() * sizeof(T)); diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index b36b5c3a339bd7e534bcc3eb7a2efef313cb2a5d..553a06c3dcdbb9de43afcace75ebec7c5e819d4a 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lrn_op.h" +#include #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif @@ -214,7 +215,10 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { "Defaults to \"NHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("AnyLayout"); - AddAttr("is_test", "").SetDefault(false); + AddAttr("is_test", + "Turns on memory optimization that optimizes away " + "unnecessary memory allocations. Used by MKLDNN.") + .SetDefault(false); AddComment(R"DOC( Local Response Normalization Operator. diff --git a/paddle/fluid/operators/lrn_op.h b/paddle/fluid/operators/lrn_op.h index 95796f7eecd2bcd61aab7944f557ca568b03e027..0fd3175e8579df9e61368cc151a94fa45e433884 100644 --- a/paddle/fluid/operators/lrn_op.h +++ b/paddle/fluid/operators/lrn_op.h @@ -121,6 +121,10 @@ class LRNGradKernel : public framework::OpKernel { T alpha = ctx.Attr("alpha"); T beta = ctx.Attr("beta"); + PADDLE_ENFORCE( + !ctx.Attr("is_test"), + "is_test attribute should be set to False in training phase."); + LRNGradFunctor f; f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta); } diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index d75537741ef1d13b61ad6e244b2bba1ae5509da5..e062d62c66c25e386c7643e310034bc1481ec43d 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lstm_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 11f9f223b5d9a8091c51c93cee3f9c23b62e5573..a1ef0eb278dea7205cd8052bbe006b0ae4e3a466 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 76245a1b5a9c8ba9c7ee7d7c03a95e2595a01591..acf094238fff92711edf00b4180266138362add1 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -18,6 +18,7 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.c #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cross_entropy_op.h" +#include "paddle/fluid/operators/lstm_unit_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index a881ef82ec3cefa826f5f0856cc4fc13c7d7afc0..82541517e122d5da2674b55561ba72af970a2567 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lstmp_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index dfa7f74d5116b4e3f1508f8bef94c598711e8124..172db548960135fbc1841cf58b73894d4f74d838 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index 22147d79e4b1eeee76f7445dd963bf5062049a34..c0e983e4aa7abbdd87649f5a3147d2a464993bce 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/tensor.h" namespace paddle { diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 299a0aed01dfe0448d896738d9fd33319b1b2887..44fd739fb1d161c6c7d6ab1cc611c59220280a4e 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -322,6 +322,14 @@ void set_constant_with_place( TensorSetConstantCPU(tensor, value)); } +template <> +void set_constant_with_place( + const platform::DeviceContext& context, framework::Tensor* tensor, + float value) { + framework::VisitDataType(framework::ToDataType(tensor->type()), + TensorSetConstantCPU(tensor, value)); +} + struct TensorSetConstantWithPlace : public boost::static_visitor { TensorSetConstantWithPlace(const platform::DeviceContext& context, framework::Tensor* tensor, float value) diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 1e909db5288afccb9dd0be08a45cf3c27048ae6f..9badf26c9bb80acad029be3d1b63377cef63d929 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -39,18 +39,46 @@ void gemm( cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + // TODO(kexinzhao): add processing code for compute capability < 53 case + PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, + "cublas fp16 gemm requires GPU compute capability >= 53"); + +#if CUDA_VERSION >= 8000 + float h_alpha = static_cast(alpha); + float h_beta = static_cast(beta); + + cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT; +#if CUDA_VERSION >= 9000 + if (context.GetComputeCapability() >= 70) { + PADDLE_ENFORCE(platform::dynload::cublasSetMathMode(context.cublas_handle(), + CUBLAS_TENSOR_OP_MATH)); + algo = CUBLAS_GEMM_DFALT_TENSOR_OP; + } else { + PADDLE_ENFORCE(platform::dynload::cublasSetMathMode(context.cublas_handle(), + CUBLAS_DEFAULT_MATH)); + } +#endif // CUDA_VERSION >= 9000 + + // cublasHgemm does true FP16 computation which is slow for non-Volta + // GPUs. So use cublasGemmEx instead which does pesudo FP16 computation: + // input/output in fp16, computation in fp32, which can also be accelerated + // using tensor cores in volta GPUs. + PADDLE_ENFORCE(platform::dynload::cublasGemmEx( + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, B, + CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &h_beta, C, CUDA_R_16F, N, + CUDA_R_32F, algo)); +#else + // CUDA 7.5 does not support cublasGemmEx, hence we fall back to use hgemm const half h_alpha = static_cast(alpha); const half h_beta = static_cast(beta); const half* h_A = reinterpret_cast(A); const half* h_B = reinterpret_cast(B); half* h_C = reinterpret_cast(C); - // TODO(kexinzhao): add processing code for compute capability < 53 case - PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, - "cublas Hgemm requires GPU compute capability >= 53"); PADDLE_ENFORCE(platform::dynload::cublasHgemm( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb, h_A, lda, &h_beta, h_C, N)); +#endif // CUDA_VERSION >= 8000 } template <> @@ -240,6 +268,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, float16* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -260,9 +289,13 @@ void batched_gemm( // TODO(kexinzhao): add processing code for compute capability < 53 case PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, "cublas Hgemm requires GPU compute capability >= 53"); + PADDLE_ENFORCE(platform::dynload::cublasHgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb, strideB, h_A, lda, strideA, &h_beta, h_C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "HgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> @@ -271,6 +304,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -285,6 +319,9 @@ void batched_gemm( PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "SgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> @@ -293,6 +330,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -307,6 +345,9 @@ void batched_gemm( PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "DgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index f7a6f2bdf4e3b7896df39acfa51fa20577b20f3b..5ae42ab973c81d3794fbbbe088e37ab02168c8dc 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -19,8 +19,17 @@ namespace paddle { namespace operators { namespace math { +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; + template -class MaxSeqPoolFunctor { +class MaxSeqPoolFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, @@ -60,7 +69,7 @@ class MaxSeqPoolFunctor { }; template -class MaxSeqPoolGradFunctor { +class MaxSeqPoolGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& out_grad, @@ -93,10 +102,101 @@ class MaxSeqPoolGradFunctor { } }; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolGradFunctor; -template class MaxSeqPoolGradFunctor; +template +class SequencePoolFunctor { + public: + /* max pool has index output */ + void operator()(const platform::CPUDeviceContext& context, + const std::string pooltype, const framework::LoDTensor& input, + framework::Tensor* output, + framework::Tensor* index = nullptr) { + if (pooltype == "MAX") { + math::MaxSeqPoolFunctor max_pool; + max_pool(context, input, output, index); + return; + } + auto lod = input.lod()[0]; + auto& place = *context.eigen_device(); + for (int i = 0; i < static_cast(lod.size()) - 1; ++i) { + Tensor in_t = + input.Slice(static_cast(lod[i]), static_cast(lod[i + 1])); + Tensor out_t = output->Slice(i, i + 1); + int64_t h = static_cast(lod[i + 1] - lod[i]); + int64_t w = input.numel() / input.dims()[0]; + auto in_e = EigenMatrix::From(in_t, framework::make_ddim({h, w})); + auto out_e = EigenVector::Flatten(out_t); + if (pooltype == "AVERAGE") { + out_e.device(place) = in_e.mean(Eigen::array({{0}})); + } else if (pooltype == "SUM") { + out_e.device(place) = in_e.sum(Eigen::array({{0}})); + } else if (pooltype == "SQRT") { + out_e.device(place) = in_e.sum(Eigen::array({{0}})) / + std::sqrt(static_cast(h)); + } else if (pooltype == "LAST") { + out_e.device(place) = in_e.chip(h - 1, 0); + } else if (pooltype == "FIRST") { + out_e.device(place) = in_e.chip(0, 0); + } else { + PADDLE_THROW("unsupported pooling pooltype"); + } + } + } +}; + +template +class SequencePoolGradFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const std::string pooltype, const framework::Tensor& out_grad, + framework::LoDTensor* in_grad, + /* max pool has index */ + const framework::Tensor* index = nullptr) { + if (pooltype == "MAX") { + math::MaxSeqPoolGradFunctor max_pool_grad; + max_pool_grad(context, out_grad, *index, in_grad); + return; + } + + if (pooltype == "LAST" || pooltype == "FIRST") { + // set X@Grad be zero at first when pooltype is LAST/FIRST + math::SetConstant functor; + functor(context, in_grad, 0); + } + auto lod = in_grad->lod()[0]; + auto& place = *context.eigen_device(); + for (int i = 0; i < static_cast(lod.size()) - 1; ++i) { + auto in_g_t = in_grad->Slice(static_cast(lod[i]), + static_cast(lod[i + 1])); + auto out_g_t = out_grad.Slice(i, i + 1); + int64_t h = static_cast(lod[i + 1] - lod[i]); + int64_t w = in_grad->numel() / in_grad->dims()[0]; + auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); + auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); + auto out_g_e_v = EigenVector::Flatten(out_g_t); + Eigen::DSizes bcast(h, 1); + + if (pooltype == "AVERAGE") { + in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); + } else if (pooltype == "SUM") { + in_g_e.device(place) = (out_g_e).broadcast(bcast); + } else if (pooltype == "SQRT") { + in_g_e.device(place) = + (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); + } else if (pooltype == "LAST") { + in_g_e.chip(h - 1, 0).device(place) = out_g_e_v; + } else if (pooltype == "FIRST") { + in_g_e.chip(0, 0).device(place) = out_g_e_v; + } else { + PADDLE_THROW("unsupported pooling pooltype"); + } + } + } +}; + +template class SequencePoolFunctor; +template class SequencePoolFunctor; +template class SequencePoolGradFunctor; +template class SequencePoolGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index d61407c020142f046f41f71a56702fd6106df628..1935364da37e9a9881651455d2da4ecef1b1e266 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { @@ -22,113 +23,331 @@ namespace math { #define FLT_MAX __FLT_MAX__ template -__global__ void KeMaxSequencePool(const T* input, const size_t* starts, - T* output, int* index, int64_t num_seq, - int64_t dim) { - int dim_idx = threadIdx.x; - int seq_id = blockIdx.x; - if (seq_id >= num_seq) return; - size_t start = starts[seq_id]; - size_t end = starts[seq_id + 1]; - - for (int64_t i = dim_idx; i < dim; i += blockDim.x) { - T max_val = static_cast(-FLT_MAX); - int max_id = -1; - for (size_t step_id = start; step_id < end; step_id++) { - if (max_val < input[step_id * dim + i]) { - max_val = input[step_id * dim + i]; - max_id = step_id; +struct MaxPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + T max_val = static_cast(-FLT_MAX); + int max_index = -1; + for (int i = start; i < end; ++i) { + if (max_val < input[item_dim * i + tid]) { + max_val = input[item_dim * i + tid]; + max_index = i; + } } + output[tid] = max_val; + index[tid] = max_index; } - output[seq_id * dim + i] = max_val; - index[seq_id * dim + i] = max_id; } -} +}; template -class MaxSeqPoolFunctor { - public: - void operator()(const platform::CUDADeviceContext& context, - const framework::LoDTensor& input, framework::Tensor* output, - framework::Tensor* index) { - auto in_dims = input.dims(); - auto out_dims = output->dims(); - auto idx_dims = index->dims(); - PADDLE_ENFORCE_GT(in_dims.size(), static_cast(1)); - PADDLE_ENFORCE_GT(out_dims.size(), 1); - for (int64_t i = 1; i < in_dims.size(); ++i) { - PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); +struct AvgPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + T val = static_cast(0); + for (int i = start; i < end; ++i) { + val += input[item_dim * i + tid]; + } + // end, start is lod, so end - start != 0 + output[tid] = val / static_cast(end - start); } - PADDLE_ENFORCE_EQ(idx_dims, out_dims); + } +}; - auto starts = input.lod()[0]; - const T* in_data = input.data(); - T* out_data = output->data(); - int* max_index = index->data(); +template +struct SumPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + T val = static_cast(0); + for (int i = start; i < end; ++i) { + val += input[item_dim * i + tid]; + } + output[tid] = val; + } + } +}; - int64_t num_seq = out_dims[0]; - int64_t dim = output->numel() / num_seq; +template +struct SqrtPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + T val = static_cast(0); + for (int i = start; i < end; ++i) { + val += input[item_dim * i + tid]; + } + // end, start is lod, so end - start != 0 + output[tid] = val / sqrt(end - start); + } + } +}; - dim3 threads(256, 1); - dim3 grid(num_seq, 1); - auto stream = context.stream(); - KeMaxSequencePool<<>>( - in_data, starts.CUDAData(context.GetPlace()), out_data, max_index, - num_seq, dim); +template +struct LastPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + output[tid] = input[item_dim * (end - 1) + tid]; + } } }; template -__global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index, - T* in_grad, int64_t num_seq, - int64_t dim) { - int idx = threadIdx.x + blockIdx.x * blockDim.x; - int col_idx = idx % dim; - if (idx < num_seq * dim) { - int step_id = max_index[idx]; - in_grad[step_id * dim + col_idx] = out_grad[idx]; +struct FirstPoolFunctor { + HOSTDEVICE void operator()(const T* input, const size_t start, + const size_t end, const size_t item_dim, T* output, + int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + output[tid] = input[item_dim * start + tid]; + } } +}; + +template +__global__ void sequence_pool_kernel(Range_OP op, const T* input, + const size_t* lod, const size_t lod_size, + const size_t item_dim, T* output, + int* index) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + size_t start = lod[bid]; + size_t end = lod[bid + 1]; + int* index_offset = nullptr; + if (index != nullptr) { + index_offset = &index[bid * item_dim]; + } + op(input, start, end, item_dim, &output[bid * item_dim], index_offset); } template -class MaxSeqPoolGradFunctor { +class SequencePoolFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& out_grad, - const framework::Tensor& index, - framework::LoDTensor* in_grad) { - auto og_dims = out_grad.dims(); - auto idx_dims = index.dims(); - auto ig_dims = in_grad->dims(); - PADDLE_ENFORCE_GT(og_dims.size(), static_cast(1)); - PADDLE_ENFORCE_GT(ig_dims.size(), static_cast(1)); - for (int64_t i = 1; i < og_dims.size(); ++i) { - PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); + const std::string pooltype, const framework::LoDTensor& input, + framework::Tensor* output, + framework::Tensor* index = nullptr) { + auto lod = input.lod()[0]; + const size_t item_dim = output->numel() / output->dims()[0]; + dim3 threads(1024, 1); + dim3 grid(lod.size(), 1); + if (pooltype == "MAX") { + sequence_pool_kernel< + T, MaxPoolFunctor><<>>( + MaxPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), index->data()); + } else if (pooltype == "AVERAGE") { + sequence_pool_kernel< + T, AvgPoolFunctor><<>>( + AvgPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "SUM") { + sequence_pool_kernel< + T, SumPoolFunctor><<>>( + SumPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "SQRT") { + sequence_pool_kernel< + T, SqrtPoolFunctor><<>>( + SqrtPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "LAST") { + sequence_pool_kernel< + T, LastPoolFunctor><<>>( + LastPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "FIRST") { + sequence_pool_kernel< + T, FirstPoolFunctor><<>>( + FirstPoolFunctor(), input.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + output->mutable_data(context.GetPlace()), nullptr); + } else { + PADDLE_THROW("unsupported pooling pooltype"); } - PADDLE_ENFORCE_EQ(idx_dims, og_dims); + } +}; - const T* og_data = out_grad.data(); - const int* max_index = index.data(); - T* ig_data = in_grad->data(); +template +struct MaxPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + if (i == index[tid]) { + in_grad[item_dim * i + tid] = out_grad[tid]; + } else { + in_grad[item_dim * i + tid] = static_cast(0); + } + } + } + } +}; - SetConstant set_zero; - set_zero(context, in_grad, static_cast(0.0)); - int64_t num_seq = og_dims[0]; - int64_t dim = out_grad.numel() / num_seq; +template +struct AvgPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + in_grad[item_dim * i + tid] = out_grad[tid] / (end - start); + } + } + } +}; - unsigned int blocks = (num_seq * dim + 128 - 1) / 128; - dim3 threads(128, 1); - dim3 grid(blocks, 1); - auto stream = context.stream(); - KeMaxSequencePoolGrad<<>>( - og_data, max_index, ig_data, num_seq, dim); +template +struct SumPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + in_grad[item_dim * i + tid] = out_grad[tid]; + } + } + } +}; + +template +struct SqrtPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + in_grad[item_dim * i + tid] = + out_grad[tid] / (sqrt(static_cast(end - start))); + } + } + } +}; + +template +struct LastPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + if (i == end - 1) { + in_grad[item_dim * i + tid] = out_grad[tid]; + } else { + in_grad[item_dim * i + tid] = static_cast(0); + } + } + } + } +}; + +template +struct FirstPoolGradFunctor { + HOSTDEVICE void operator()(const T* out_grad, const size_t start, + const size_t end, const size_t item_dim, + T* in_grad, const int* index) { + for (int tid = threadIdx.x; tid < item_dim; tid += blockDim.x) { + for (int i = start; i < end; ++i) { + if (i == start) { + in_grad[item_dim * i + tid] = out_grad[tid]; + } else { + in_grad[item_dim * i + tid] = static_cast(0); + } + } + } + } +}; + +template +__global__ void sequence_pool_grad_kernel(Range_OP op, const T* out_grad, + const size_t* lod, + const size_t lod_size, + const size_t item_dim, T* in_grad, + const int* index) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + size_t start = lod[bid]; + size_t end = lod[bid + 1]; + const int* index_offset = nullptr; + if (index != nullptr) { + index_offset = &index[bid * item_dim]; + } + op(&out_grad[bid * item_dim], start, end, item_dim, in_grad, index_offset); +} + +template +class SequencePoolGradFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const std::string pooltype, const framework::Tensor& out_grad, + framework::LoDTensor* in_grad, + /* max pool has index */ + const framework::Tensor* index = nullptr) { + auto lod = in_grad->lod()[0]; + const size_t item_dim = in_grad->numel() / in_grad->dims()[0]; + dim3 threads(1024, 1); + dim3 grid(lod.size(), 1); + if (pooltype == "MAX") { + sequence_pool_grad_kernel< + T, MaxPoolGradFunctor><<>>( + MaxPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), index->data()); + } else if (pooltype == "AVERAGE") { + sequence_pool_grad_kernel< + T, AvgPoolGradFunctor><<>>( + AvgPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "SUM") { + sequence_pool_grad_kernel< + T, SumPoolGradFunctor><<>>( + SumPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "SQRT") { + sequence_pool_grad_kernel< + T, SqrtPoolGradFunctor><<>>( + SqrtPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "LAST") { + sequence_pool_grad_kernel< + T, LastPoolGradFunctor><<>>( + LastPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), nullptr); + } else if (pooltype == "FIRST") { + sequence_pool_grad_kernel< + T, FirstPoolGradFunctor><<>>( + FirstPoolGradFunctor(), out_grad.data(), + lod.CUDAData(context.GetPlace()), lod.size(), item_dim, + in_grad->mutable_data(context.GetPlace()), nullptr); + + } else { + PADDLE_THROW("unsupported pooling pooltype"); + } } }; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolGradFunctor; -template class MaxSeqPoolGradFunctor; +// sequence pooling +template class SequencePoolFunctor; +template class SequencePoolFunctor; +template class SequencePoolGradFunctor; +template class SequencePoolGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index ecb76884f670df1aee64ed65c3bb0cf09c5beaff..38e780222955644c14e5bbbf16dee720c7758f5c 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -21,23 +21,23 @@ namespace paddle { namespace operators { namespace math { -#define FLT_MAX __FLT_MAX__ - template -class MaxSeqPoolFunctor { +class SequencePoolFunctor { public: - void operator()(const DeviceContext& context, + /* max pool has index output */ + void operator()(const DeviceContext& context, const std::string pooltype, const framework::LoDTensor& input, framework::Tensor* output, - framework::Tensor* index); + framework::Tensor* index = nullptr); }; -template -class MaxSeqPoolGradFunctor { +template +class SequencePoolGradFunctor { public: - void operator()(const DeviceContext& context, + void operator()(const DeviceContext& context, const std::string pooltype, const framework::Tensor& out_grad, - const framework::Tensor& index, - framework::LoDTensor* in_grad); + framework::LoDTensor* in_grad, + /* max pool has index */ + const framework::Tensor* index = nullptr); }; } // namespace math diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 5518ebed3f792a5acdfbb27976bc2c6dbd78069a..a579182ec1bd5d10d95bbf8c6f5a0e70ceaaaf4b 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -14,6 +14,8 @@ limitations under the License. */ #define EIGEN_USE_GPU +#include + #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/softmax.h" #include "paddle/fluid/operators/math/softmax_impl.h" @@ -95,6 +97,7 @@ template class SoftmaxCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; +template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxGradFunctor; diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 3e123f7bf5512618538fd35aa7e74b82586a5448..dd9971ba091cc3ece86654f65c335b98087f45ed 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -27,7 +27,7 @@ using EigenMatrix = framework::EigenMatrix; template struct ValueClip { HOSTDEVICE T operator()(const T& x) const { - const T kThreshold = -64.; + const T kThreshold = static_cast(-64.); return x < kThreshold ? kThreshold : x; } }; diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 85855928521b8b4cc5e8746b0b5f841cc2587618..1f5255887391218b766aa23842e443c8b2ad080f 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/matmul_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h index 1cd8fe55dcbd23eae771550a363bf0a07e9bf585..f2e9cfdcdbf93326ae193776a7d5f6a324373603 100644 --- a/paddle/fluid/operators/matmul_op.h +++ b/paddle/fluid/operators/matmul_op.h @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/matmul.h" diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index efaae7d5f2d20484d90f79d9e13ec2f5ed6e06c9..4e28d98834d27351be99106d6760eae46baf8938 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -13,6 +13,8 @@ * limitations under the License. */ #include "paddle/fluid/operators/maxout_op.h" +#include + namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 7de9d94979fdc3f3352c556cc8b655ad4bc7e201..a302b24560e680076d62d02b422c6410467deb1d 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/minus_op.h" -#include "paddle/fluid/operators/net_op.h" + +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu index da4a6af298f61a20e60ff1b8358f30bb0aca2280..5eb9d9950248bb50bb823f071c7fff0ddcc47234 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/momentum_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index 90af1e2d602ac039b4d98a69a889ff8b1b85ffc6..5038287527c70d376d8c8a1cc8e4cca0b563126a 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mul_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 90f6f955cea51ded2dbb2bde459113458d7749a4..20b8a5c98ab16ac8121cb2fd01deb8ecc1966d44 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include -#include -#include +#include // NOLINT +#include // NOLINT #include #include "paddle/fluid/framework/init.h" @@ -43,7 +43,7 @@ const f::DDim kDims = {20, 20}; // nccl op common tester, init communicator. class NCCLTester : public ::testing::Test { public: - virtual void SetUp() override { + void SetUp() override { int count = p::GetCUDADeviceCount(); if (count <= 1) { LOG(WARNING) @@ -64,7 +64,7 @@ class NCCLTester : public ::testing::Test { NCCLInitOp(); } - virtual void TearDown() override { + void TearDown() override { for (auto &device_context : dev_ctxs_) { delete device_context; } @@ -137,6 +137,8 @@ class NCCLTester : public ::testing::Test { TEST_F(NCCLTester, ncclInitOp) {} // ncclAllReduceOp with desc +// TODO(helin): https://github.com/PaddlePaddle/Paddle/issues/9367 +/* TEST_F(NCCLTester, ncclAllReduceOp) { std::unique_ptr op2(new f::OpDesc); op2->SetType("ncclAllReduce"); @@ -184,6 +186,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { } } } +*/ // ncclReduceOp with desc TEST_F(NCCLTester, ncclReduceOp) { @@ -236,6 +239,8 @@ TEST_F(NCCLTester, ncclReduceOp) { } // ncclBcastOp with desc +// TODO(helin): https://github.com/PaddlePaddle/Paddle/issues/9540 +/* TEST_F(NCCLTester, ncclBcastOp) { std::unique_ptr op2(new f::OpDesc); const int kRoot = 0; @@ -281,3 +286,4 @@ TEST_F(NCCLTester, ncclBcastOp) { ASSERT_NEAR(ct[j], result, 1e-5); } } +*/ diff --git a/paddle/fluid/operators/net_op.cc b/paddle/fluid/operators/net_op.cc deleted file mode 100644 index 0c2da744177b602246719d701257fc1b509ad81e..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op.cc +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/net_op.h" -#include -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -const char NetOp::kAll[] = "all"; - -void NetOp::CompleteAddOp(bool calc) { - add_op_done_ = true; - if (!calc) return; - std::set input_set; - std::set output_set; - for (auto& op : ops_) { - for (auto& ipt : op->Inputs()) { - for (auto& var_name : ipt.second) { - // If input variable has been in output set, then it will be - // added into intermediate_outputs_. Otherwise, it will be - // added into input set. - if (Contains(output_set, var_name)) { - intermediate_outputs_.insert(var_name); - } else { - input_set.insert(var_name); - } - } - } - - for (auto& opt : op->Outputs()) { - for (auto& var_name : opt.second) { - output_set.insert(var_name); - } - } - } - auto& inputs = inputs_[kAll]; - inputs.reserve(input_set.size()); - std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs)); - auto& outputs = outputs_[kAll]; - outputs.reserve(output_set.size()); - std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs)); -} - -std::string NetOp::DebugStringEx(const framework::Scope* scope) const { - std::ostringstream os; - os << OperatorBase::DebugStringEx(scope) << std::endl; - for (auto& op : ops_) { - std::istringstream is(op->DebugStringEx(scope)); - for (std::string line; std::getline(is, line);) { - os << " " << line << std::endl; - } - } - return os.str(); -} - -bool NetOp::IsNetOp() const { return true; } - -std::vector NetOp::OutputVars(bool has_intermediate) const { - std::vector all; - for (auto& pair : this->outputs_) { - for (auto& var_name : pair.second) { - all.push_back(var_name); - } - } - if (has_intermediate) { - return all; - } - std::vector ret_val; - for (auto& each : all) { - if (!Contains(intermediate_outputs_, each)) { - ret_val.push_back(each); - } - } - return ret_val; -} - -NetOp::NetOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : framework::OperatorBase(type, inputs, outputs, attrs) {} - -std::unique_ptr NetOp::Clone() const { - PADDLE_ENFORCE( - add_op_done_, - "Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone"); - return std::unique_ptr(new NetOp(*this)); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/net_op.h b/paddle/fluid/operators/net_op.h deleted file mode 100644 index cbf8820cf4991bc24893f13646364dea0955a128..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op.h +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -/** - * @brief Network is also a type of Operator - * - * It will manage the operators it has. - * - * Network is the container and controller of a set of operators. - - * A network object knows all Operators belonging to this network. Variables, - * which are inputs and outputs of these operators, are created and managed by a - * hierarchy of Scope objects. - * - * This is the base class of network, all the networks should implement the APIs - * it defines. - */ -class NetOp : public framework::OperatorBase { - public: - static const char kAll[]; - NetOp() - : framework::OperatorBase("plain_net", framework::VariableNameMap{}, - framework::VariableNameMap{}, - framework::AttributeMap{}) {} - - NetOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs); - - NetOp(const NetOp& o) : framework::OperatorBase(o.type_, {}, {}, o.attrs_) { - this->ops_.reserve(o.ops_.size()); - std::transform( - o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), - [](const std::unique_ptr& op) { - return std::unique_ptr(op->Clone()); - }); - this->CompleteAddOp(); - } - - bool SupportGPU() const override { - for (auto& op : ops_) { - if (!op->SupportGPU()) { - return false; - } - } - return true; - } - - void AppendOp(const framework::OperatorBase& op) { AppendOp(op.Clone()); } - - /** - * @brief Add an operator by ptr - */ - void AppendOp(std::unique_ptr op) { - PADDLE_ENFORCE(!add_op_done_, - "Cannot AppendOp when this network is sealed"); - PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - ops_.push_back(std::move(op)); - } - - void InsertOp(size_t pos, std::unique_ptr op) { - PADDLE_ENFORCE(!add_op_done_, - "Cannot InsertOp when this network is sealed"); - PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); - ops_.insert(ops_.begin() + pos, std::move(op)); - } - - void InsertOp(size_t pos, const framework::OperatorBase& op) { - InsertOp(pos, op.Clone()); - } - - void CompleteAddOp(bool calculate = true); - - std::string DebugStringEx( - const framework::Scope* scope = nullptr) const override; - - bool IsNetOp() const override; - std::vector OutputVars(bool has_intermediate) const override; - - std::unique_ptr Clone() const override; - - std::vector> ops_; - - private: - /** - * @brief Run the network. - * - * Run all the operators with the `scope`, if no scope is provided, default - * scope will be used instead. If no OpContext is provicded, default context - * will be used. - */ - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override { - for (auto& op : ops_) { - op->Run(scope, place); - } - } - - bool add_op_done_{false}; - std::set intermediate_outputs_; - - template - static bool Contains(T container, KeyType key) { - return container.find(key) != container.end(); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/net_op_test.cc b/paddle/fluid/operators/net_op_test.cc deleted file mode 100644 index 3b5f57548585398c441fd8038ba8b053c27392cf..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op_test.cc +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/fluid/operators/net_op.h" - -#include - -namespace paddle { -namespace operators { -using Scope = framework::Scope; -using DeviceContext = platform::DeviceContext; - -static int run_cnt = 0; - -class TestOp : public framework::OperatorBase { - public: - using framework::OperatorBase::OperatorBase; - DEFINE_OP_CLONE_METHOD(TestOp); - - private: - void RunImpl(const Scope& scope, - const platform::Place& place) const override { - ++run_cnt; - } -}; - -template -void AssertSameVectorWithoutOrder(const std::vector& expected, - const std::vector& actual) { - ASSERT_EQ(expected.size(), actual.size()); - std::unordered_set expected_set; - for (auto& tmp : expected) { - expected_set.insert(tmp); - } - for (auto& act : actual) { - ASSERT_NE(expected_set.end(), expected_set.find(act)); - } -} - -TEST(OpKernel, all) { - auto net = std::make_shared(); - ASSERT_NE(net, nullptr); - - net->AppendOp(std::unique_ptr( - new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, framework::AttributeMap{}))); - net->AppendOp(std::unique_ptr( - new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"Out", {"z"}}}, framework::AttributeMap{}))); - - net->CompleteAddOp(); - AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, - net->Inputs(NetOp::kAll)); - AssertSameVectorWithoutOrder({"y", "z"}, net->Outputs(NetOp::kAll)); - - auto final_outs = net->OutputVars(false); - - ASSERT_EQ(final_outs.size(), 1UL); - ASSERT_EQ(final_outs[0], "z"); -} - -TEST(NetOp, insert_op) { - NetOp net; - auto op1 = std::unique_ptr( - new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, framework::AttributeMap{})); - net.AppendOp(*op1); - net.InsertOp(0, *op1); - ASSERT_EQ(2UL, net.ops_.size()); - net.InsertOp(2, std::move(op1)); - ASSERT_EQ(3UL, net.ops_.size()); -} - -TEST(NetOp, Clone) { - NetOp net; - net.AppendOp(std::unique_ptr(new framework::NOP{ - "empty", framework::VariableNameMap{}, framework::VariableNameMap{}, - framework::AttributeMap{}})); - net.AppendOp(std::unique_ptr(new framework::NOP{ - "empty2", framework::VariableNameMap{}, framework::VariableNameMap{}, - framework::AttributeMap{}})); - net.CompleteAddOp(true); - auto new_net_op = net.Clone(); - ASSERT_NE(new_net_op, nullptr); - ASSERT_TRUE(new_net_op->IsNetOp()); - auto* new_net = static_cast(new_net_op.get()); - ASSERT_EQ(2UL, new_net->ops_.size()); - ASSERT_EQ(new_net->ops_[0]->Type(), "empty"); - ASSERT_EQ(new_net->ops_[1]->Type(), "empty2"); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/pad_op.h b/paddle/fluid/operators/pad_op.h index a36abe3789574cb64f05001e34d534cf352a60b2..c93c096575a30dd9344894ead4b81acc16930e21 100644 --- a/paddle/fluid/operators/pad_op.h +++ b/paddle/fluid/operators/pad_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/pool_mkldnn_op.cc b/paddle/fluid/operators/pool_mkldnn_op.cc index c88578570c1acdecaa97dd8b12a702778fef2b7e..63eaaedcd5fc3df17902511dc02b25bf43ccd241 100644 --- a/paddle/fluid/operators/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/pool_mkldnn_op.cc @@ -83,9 +83,11 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, (void*)input_data); + mkldnn::memory({src_md, mkldnn_engine}, + static_cast(const_cast(input_data))); auto dst_memory = - mkldnn::memory({dst_md, mkldnn_engine}, (void*)output_data); + mkldnn::memory({dst_md, mkldnn_engine}, + static_cast(const_cast(output_data))); auto pool_prim = mkldnn::pooling_forward(*pool_pd, src_memory, dst_memory, *workspace_memory); @@ -195,9 +197,11 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { pool_bwd_desc, mkldnn_engine, *pool_pd); auto diff_src_memory = - mkldnn::memory({diff_src_md, mkldnn_engine}, (void*)in_x_grad_data); + mkldnn::memory({diff_src_md, mkldnn_engine}, + static_cast(const_cast(in_x_grad_data))); auto diff_dst_memory = - mkldnn::memory({diff_dst_md, mkldnn_engine}, (void*)out_grad_data); + mkldnn::memory({diff_dst_md, mkldnn_engine}, + static_cast(const_cast(out_grad_data))); auto bwd_prim = mkldnn::pooling_backward( pool_bwd_pd, diff_dst_memory, *workspace_memory, diff_src_memory); diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index 2fec50ef25e0d2621a87963acdf142d24970329d..a48127ea6983d3d4ea12ec4925f30af233002ef2 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/pool_with_index_op.h b/paddle/fluid/operators/pool_with_index_op.h index 83e7bd138ae25c6d3e09c3d01178d6887205bf98..b55fa76eae34c3179d40f31ed6a57d3ecbbaaccf 100644 --- a/paddle/fluid/operators/pool_with_index_op.h +++ b/paddle/fluid/operators/pool_with_index_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..f9ae01ab5d2972d2a74b36ae6035985d1d874bb6 --- /dev/null +++ b/paddle/fluid/operators/prefetch_op.cc @@ -0,0 +1,115 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include // NOLINT +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/send_recv_util.h" + +namespace paddle { +namespace operators { + +class PrefetchOp : public framework::OperatorBase { + public: + PrefetchOp(const std::string& type, const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { + auto ins = Inputs("X"); + auto outs = Outputs("Out"); + + std::vector epmap = Attr>("epmap"); + + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& ctx = *pool.Get(place); + + auto client_var_name = Output("RPCClient"); + PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), + "Can not find variable '%s' in the scope.", + client_var_name); + auto* client_var = scope.FindVar(client_var_name); + detail::RPCClient* rpc_client = client_var->GetMutable(); + + for (size_t i = 0; i < ins.size(); i++) { + if (NeedSend(scope, ins[i])) { + VLOG(3) << "sending " << ins[i] << " to " << epmap[i] << " to get " + << outs[i] << " back"; + rpc_client->AsyncPrefetchVariable(epmap[i], ctx, scope, ins[i], + outs[i]); + } else { + VLOG(3) << "don't send no-initialied variable: " << ins[i]; + } + } + PADDLE_ENFORCE(rpc_client->Wait()); + } +}; + +class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { + public: + PrefetchOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(LoDTensor) Input Id variables to be sent").AsDuplicable(); + AddOutput("RPCClient", + "(RPCClient) The RPC client object which will be" + "initialized at most once."); + AddOutput("Out", + "(LoDTensor) result " + "to be fetched from parameter server") + .AsDuplicable(); + AddAttr>( + "epmap", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints in the order of input variables for mapping") + .SetDefault({"127.0.0.1:6164"}); + AddComment(R"DOC( +Prefetch operator + +This operator will send Ids variables to listen_and_serve op at +the parameter server and fetch result back. +)DOC"); + } +}; + +class PrefetchOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + auto out_var_name = op_desc.Output("RPCClient").front(); + auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); + auto var_type = framework::proto::VarType::RAW; + out_var.SetType(var_type); + } +}; + +class PrefetchOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(prefetch, ops::PrefetchOp, + paddle::framework::EmptyGradOpMaker, ops::PrefetchOpMaker, + ops::PrefetchOpVarTypeInference, + ops::PrefetchOpShapeInference); diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index 447b854544b72043ea09c09c134af3a48c305561..8eaa12a4a6cfc09fd4e2c3642bc8825fe2af6d6b 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/prelu_op.h" -#include "paddle/fluid/operators/net_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index c22a55bce263423d5c17fffdb06b7ece02ae26da..058b13eeb872aaa77a88da37db64a6d59fbdd1cf 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -45,7 +45,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { bool flip = ctx->Attrs().Get("flip"); std::vector aspect_ratios_vec; - ExpandAspectRatios(aspect_ratios, flip, aspect_ratios_vec); + ExpandAspectRatios(aspect_ratios, flip, &aspect_ratios_vec); size_t num_priors = aspect_ratios_vec.size() * min_sizes.size(); if (max_sizes.size() > 0) { @@ -73,7 +73,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), - platform::CPUPlace()); + ctx.device_context()); } }; @@ -171,6 +171,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - prior_box, ops::PriorBoxOpKernel, - ops::PriorBoxOpKernel); +REGISTER_OP_CPU_KERNEL(prior_box, ops::PriorBoxOpKernel, + ops::PriorBoxOpKernel); diff --git a/paddle/fluid/operators/prior_box_op.cu b/paddle/fluid/operators/prior_box_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..0ea8909296f8f52d252b0ec258666cf32d69a8bb --- /dev/null +++ b/paddle/fluid/operators/prior_box_op.cu @@ -0,0 +1,167 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/prior_box_op.h" + +namespace paddle { +namespace operators { + +template +__device__ inline T clip(T in) { + return min(max(in, 0.), 1.); +} + +template +__global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, + const int width, const int im_height, + const int im_width, const int as_num, + const T offset, const T step_width, + const T step_height, const T* min_sizes, + const T* max_sizes, const int min_num, + bool is_clip) { + int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num; + int box_num = height * width * num_priors; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; + i += blockDim.x * gridDim.x) { + int h = i / (num_priors * width); + int w = (i / num_priors) % width; + int p = i % num_priors; + int m = max_sizes ? p / (as_num + 1) : p / as_num; + T cx = (w + offset) * step_width; + T cy = (h + offset) * step_height; + T bw, bh; + T min_size = min_sizes[m]; + if (max_sizes) { + int s = p % (as_num + 1); + if (s < as_num) { + T ar = aspect_ratios[s]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } else { + T max_size = max_sizes[m]; + bw = sqrt(min_size * max_size) / 2.; + bh = bw; + } + } else { + int s = p % as_num; + T ar = aspect_ratios[s]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } + T xmin = (cx - bw) / im_width; + T ymin = (cy - bh) / im_height; + T xmax = (cx + bw) / im_width; + T ymax = (cy + bh) / im_height; + out[i * 4] = is_clip ? clip(xmin) : xmin; + out[i * 4 + 1] = is_clip ? clip(ymin) : ymin; + out[i * 4 + 2] = is_clip ? clip(xmax) : xmax; + out[i * 4 + 3] = is_clip ? clip(ymax) : ymax; + } +} + +template +__global__ void SetVariance(T* out, const T* var, const int vnum, + const int num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + out[i] = var[i % vnum]; + } +} + +template +class PriorBoxOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* image = ctx.Input("Image"); + auto* boxes = ctx.Output("Boxes"); + auto* vars = ctx.Output("Variances"); + + auto min_sizes = ctx.Attr>("min_sizes"); + auto max_sizes = ctx.Attr>("max_sizes"); + auto input_aspect_ratio = ctx.Attr>("aspect_ratios"); + auto variances = ctx.Attr>("variances"); + auto flip = ctx.Attr("flip"); + auto clip = ctx.Attr("clip"); + + std::vector aspect_ratios; + ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); + + T step_w = static_cast(ctx.Attr("step_w")); + T step_h = static_cast(ctx.Attr("step_h")); + T offset = static_cast(ctx.Attr("offset")); + + auto im_width = image->dims()[3]; + auto im_height = image->dims()[2]; + + auto width = input->dims()[3]; + auto height = input->dims()[2]; + + T step_width, step_height; + if (step_w == 0 || step_h == 0) { + step_width = static_cast(im_width) / width; + step_height = static_cast(im_height) / height; + } else { + step_width = step_w; + step_height = step_h; + } + + int num_priors = aspect_ratios.size() * min_sizes.size(); + if (max_sizes.size() > 0) { + num_priors += max_sizes.size(); + } + int min_num = static_cast(min_sizes.size()); + int box_num = width * height * num_priors; + + int block = 512; + int grid = (box_num + block - 1) / block; + + auto stream = + ctx.template device_context().stream(); + + boxes->mutable_data(ctx.GetPlace()); + vars->mutable_data(ctx.GetPlace()); + + framework::Tensor r; + framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r); + + framework::Tensor min; + framework::TensorFromVector(min_sizes, ctx.device_context(), &min); + + T* max_data = nullptr; + framework::Tensor max; + if (max_sizes.size() > 0) { + framework::TensorFromVector(max_sizes, ctx.device_context(), &max); + max_data = max.data(); + } + + GenPriorBox<<>>( + boxes->data(), r.data(), height, width, im_height, im_width, + aspect_ratios.size(), offset, step_width, step_height, min.data(), + max_data, min_num, clip); + + framework::Tensor v; + framework::TensorFromVector(variances, ctx.device_context(), &v); + grid = (box_num * 4 + block - 1) / block; + SetVariance<<>>(vars->data(), v.data(), + variances.size(), box_num * 4); + } +}; // namespace operators + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel, + ops::PriorBoxOpCUDAKernel); diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h index 18bb2deb6b5acf626dfb2883a5771d9d195d45c0..1c62fd8d2c4d4e4deba4ca6442efbaff83e36c35 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/prior_box_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/transform.h" @@ -22,23 +24,23 @@ namespace operators { inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, bool flip, - std::vector& output_aspect_ratior) { + std::vector* output_aspect_ratior) { constexpr float epsilon = 1e-6; - output_aspect_ratior.clear(); - output_aspect_ratior.push_back(1.0f); + output_aspect_ratior->clear(); + output_aspect_ratior->push_back(1.0f); for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { float ar = input_aspect_ratior[i]; bool already_exist = false; - for (size_t j = 0; j < output_aspect_ratior.size(); ++j) { - if (fabs(ar - output_aspect_ratior[j]) < epsilon) { + for (size_t j = 0; j < output_aspect_ratior->size(); ++j) { + if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) { already_exist = true; break; } } if (!already_exist) { - output_aspect_ratior.push_back(ar); + output_aspect_ratior->push_back(ar); if (flip) { - output_aspect_ratior.push_back(1.0f / ar); + output_aspect_ratior->push_back(1.0f / ar); } } } @@ -51,7 +53,7 @@ struct ClipFunctor { } }; -template +template class PriorBoxOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -68,7 +70,7 @@ class PriorBoxOpKernel : public framework::OpKernel { auto clip = ctx.Attr("clip"); std::vector aspect_ratios; - ExpandAspectRatios(input_aspect_ratio, flip, aspect_ratios); + ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); T step_w = static_cast(ctx.Attr("step_w")); T step_h = static_cast(ctx.Attr("step_h")); @@ -106,49 +108,24 @@ class PriorBoxOpKernel : public framework::OpKernel { int idx = 0; for (size_t s = 0; s < min_sizes.size(); ++s) { auto min_size = min_sizes[s]; - // first prior: aspect_ratio = 1, size = min_size - box_width = box_height = min_size / 2.; - // xmin - e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin - e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax - e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax - e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; - - idx++; - if (max_sizes.size() > 0) { - auto max_size = max_sizes[s]; - // second prior: aspect_ratio = 1, - // size = sqrt(min_size * max_size) - box_width = box_height = sqrt(min_size * max_size) / 2.; - // xmin + // priors with different aspect ratios + for (size_t r = 0; r < aspect_ratios.size(); ++r) { + float ar = aspect_ratios[r]; + box_width = min_size * sqrt(ar) / 2.; + box_height = min_size / sqrt(ar) / 2.; e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; idx++; } - - // rest of priors - for (size_t r = 0; r < aspect_ratios.size(); ++r) { - float ar = aspect_ratios[r]; - if (fabs(ar - 1.) < 1e-6) { - continue; - } - box_width = min_size * sqrt(ar) / 2.; - box_height = min_size / sqrt(ar) / 2.; - // xmin + if (max_sizes.size() > 0) { + auto max_size = max_sizes[s]; + // square prior with size sqrt(minSize * maxSize) + box_width = box_height = sqrt(min_size * max_size) / 2.; e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; idx++; } diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index 767eef56861ea075ec2450b1456e7c5c807ce25d..a1127f11a75e54168ca9682a0189255d37ee8571 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/rank_loss_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 2a5605e0d378a184ae132e657b2872279784855d..bf02b9958927580608b95d6b8ecfddc7231a02d4 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { @@ -59,17 +60,13 @@ class ReadOp : public framework::OperatorBase { void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { framework::ReaderHolder* reader = - scope.FindVar(Input("Reader"))->GetMutable(); + detail::Ref(scope.FindVar(Input("Reader")), + "Cannot find reader variable %s", Input("Reader")) + .GetMutable(); std::vector out_arg_names = Outputs("Out"); std::vector ins; reader->ReadNext(&ins); - if (ins.empty()) { - reader->ReInit(); - reader->ReadNext(&ins); - PADDLE_ENFORCE( - !ins.empty(), - "Reader can not read the next data even it has been re-initialized."); - } + PADDLE_ENFORCE(!ins.empty(), "There is no next data."); PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); for (size_t i = 0; i < ins.size(); ++i) { auto* out = diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 6fa0195b9ae103418beb56cc4b0fa9ab59e93108..845528860f91d0b479bb3c4dbbe05e32c68dc16f 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -22,5 +22,6 @@ reader_library(create_batch_reader_op SRCS create_batch_reader_op.cc) reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_op.cc) reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) +reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) # Export local libraries to parent set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/create_batch_reader_op.cc b/paddle/fluid/operators/reader/create_batch_reader_op.cc index 277f2856c07b3fec2113486539aec1d9139fae92..04c5872bef4600e30ba572a025cc5f0a5e9839ca 100644 --- a/paddle/fluid/operators/reader/create_batch_reader_op.cc +++ b/paddle/fluid/operators/reader/create_batch_reader_op.cc @@ -39,10 +39,13 @@ class CreateBatchReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); out->Reset( new BatchReader(underlying_reader.Get(), Attr("batch_size"))); } diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 141a3eb93555c32efabc2465dc6daadf41c9d659..0b7c1d6af714558d35918dac62d92d9e0f86c970 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include +#include // NOLINT + #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" @@ -20,56 +21,64 @@ namespace paddle { namespace operators { namespace reader { -static constexpr size_t kDoubleBufferSize = 2; +// 'Double buffer' means we shall maintain two batches of input data at the same +// time. So the kCacheSize shoul be at least 2. +static constexpr size_t kCacheSize = 2; +// There will be two bacthes out of the channel during training: +// 1. the one waiting to be sent to the channel +// 2. the one just be received from the channel, which is also being used by +// subsequent operators. +// So the channel size should be kChacheSize - 2 +static constexpr size_t kChannelSize = 0; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: - struct Item { - Item() : ctx_(nullptr) {} - - std::vector payloads_; - platform::DeviceContext* ctx_; - }; - explicit DoubleBufferReader( ReaderBase* reader, platform::Place target_place = platform::CPUPlace()) : DecoratedReader(reader), place_(target_place) { - for (size_t i = 0; i < kDoubleBufferSize; ++i) { - if (platform::is_gpu_place(place_)) { + cpu_tensor_cache_.resize(kCacheSize); + gpu_tensor_cache_.resize(kCacheSize); #ifdef PADDLE_WITH_CUDA + if (platform::is_gpu_place(place_)) { + for (size_t i = 0; i < kCacheSize; ++i) { ctxs_.emplace_back(new platform::CUDADeviceContext( boost::get(place_))); -#endif } } - - start_thread(); - } - - void start_thread() { - buffer_ = framework::MakeChannel(kDoubleBufferSize); - prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); +#endif + StartPrefetcher(); } void ReadNext(std::vector* out) override; void ReInit() override; - ~DoubleBufferReader() { - buffer_->Close(); - prefetcher_.join(); - delete buffer_; + ~DoubleBufferReader() { EndPrefetcher(); } + + private: + bool HasNext() const; + + void StartPrefetcher() { + channel_ = framework::MakeChannel(kChannelSize); + prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } - bool HasNext() const override; + void EndPrefetcher() { + channel_->Close(); + if (prefetcher_.joinable()) { + prefetcher_.join(); + } + delete channel_; + channel_ = nullptr; + } - private: void PrefetchThreadFunc(); std::thread prefetcher_; - framework::Channel* buffer_; + framework::Channel* channel_; platform::Place place_; + std::vector> cpu_tensor_cache_; + std::vector> gpu_tensor_cache_; std::vector> ctxs_; - mutable Item local_buffer_; }; class CreateDoubleBufferReaderOp : public framework::OperatorBase { @@ -79,14 +88,19 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); auto place_str = Attr("place"); platform::Place place; - if (place_str == "CPU") { + if (place_str == "AUTO") { + place = dev_place; + } else if (place_str == "CPU") { place = platform::CPUPlace(); } else { std::istringstream sin(place_str); @@ -117,76 +131,74 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { enum_range.insert(string::Sprintf("CUDA:%d", i)); } enum_range.insert("CPU"); - AddAttr("place", "The double buffer place, default is CPU") - .SetDefault("CPU") + enum_range.insert("AUTO"); + AddAttr("place", "The double buffer place") + .SetDefault("AUTO") .InEnum({enum_range}); } }; void DoubleBufferReader::ReadNext(std::vector* out) { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } - - if (local_buffer_.payloads_.empty()) { - buffer_->Receive(&local_buffer_); - } - *out = local_buffer_.payloads_; - local_buffer_.payloads_.clear(); - if (local_buffer_.ctx_) { - local_buffer_.ctx_->Wait(); + out->clear(); + if (HasNext()) { + size_t cached_tensor_id; + channel_->Receive(&cached_tensor_id); + if (platform::is_gpu_place(place_)) { + *out = gpu_tensor_cache_[cached_tensor_id]; + ctxs_[cached_tensor_id]->Wait(); + } else { + // CPU place + *out = cpu_tensor_cache_[cached_tensor_id]; + } } } void DoubleBufferReader::ReInit() { reader_->ReInit(); - buffer_->Close(); - prefetcher_.join(); - delete buffer_; - start_thread(); + EndPrefetcher(); + StartPrefetcher(); +} + +bool DoubleBufferReader::HasNext() const { + while (!channel_->IsClosed() && !channel_->CanReceive()) { + } + return channel_->CanReceive(); } void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; - size_t gpu_ctx_offset = 0; - while (reader_->HasNext()) { - Item batch; - reader_->ReadNext(&batch.payloads_); + size_t cached_tensor_id = 0; + while (true) { + auto& cpu_batch = cpu_tensor_cache_[cached_tensor_id]; + reader_->ReadNext(&cpu_batch); + if (cpu_batch.empty()) { + // The underlying reader have no next data. + break; + } if (platform::is_gpu_place(place_)) { - std::vector gpu_batch; - auto& gpu_ctx = this->ctxs_[gpu_ctx_offset++]; - gpu_ctx_offset %= this->ctxs_.size(); - gpu_batch.resize(batch.payloads_.size()); - for (size_t i = 0; i < batch.payloads_.size(); ++i) { - framework::TensorCopy(batch.payloads_[i], place_, *gpu_ctx, - &gpu_batch[i]); - gpu_batch[i].set_lod(batch.payloads_[i].lod()); + auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; + auto* gpu_ctx = ctxs_[cached_tensor_id].get(); + gpu_batch.resize(cpu_batch.size()); + for (size_t i = 0; i < cpu_batch.size(); ++i) { + framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i]); + gpu_batch[i].set_lod(cpu_batch[i].lod()); } - batch.ctx_ = gpu_ctx.get(); - std::swap(gpu_batch, batch.payloads_); } - try { - buffer_->Send(&batch); + size_t tmp = cached_tensor_id; + channel_->Send(&tmp); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " "prefetch thread will terminate."; break; } + ++cached_tensor_id; + cached_tensor_id %= kCacheSize; } - buffer_->Close(); + channel_->Close(); VLOG(5) << "Prefetch thread terminates."; } -bool DoubleBufferReader::HasNext() const { - if (local_buffer_.payloads_.empty()) { - bool ok = buffer_->Receive(&local_buffer_); - return ok; - } else { - return true; - } -} - } // namespace reader } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index 4d4e9fb909eafea5328491a4097276577f28a5ba..0573345ba502b6a9af35710840d5acf7634f332f 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -25,22 +25,12 @@ class MultiPassReader : public framework::DecoratedReader { : DecoratedReader(reader), pass_num_(pass_num), pass_count_(0) {} void ReadNext(std::vector* out) override { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } reader_->ReadNext(out); - } - - bool HasNext() const override { - if (reader_->HasNext()) { - return true; - } else { + if (out->empty()) { ++pass_count_; - if (pass_count_ >= pass_num_) { - return false; - } else { + if (pass_count_ < pass_num_) { reader_->ReInit(); - return true; + reader_->ReadNext(out); } } } @@ -62,12 +52,15 @@ class CreateMultiPassReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - auto& out = detail::Ref(scope.FindVar(Output("Out"))); int pass_num = Attr("pass_num"); - out.GetMutable()->Reset( - new MultiPassReader(underlying_reader.Get(), pass_num)); + out->Reset(new MultiPassReader(underlying_reader.Get(), pass_num)); } }; @@ -81,10 +74,10 @@ class CreateMultiPassReaderOpMaker : public DecoratedReaderMakerBase { This operator creates a multi-pass reader. A multi-pass reader is used to yield data for several pass training continuously. - It takes the the number of pass to run as one of its attributes + It takes the number of passes to run as one of its attributes ('pass_num'), and maintains a pass counter to record how many - passes it has completed. When the underlying reader reach the EOF, - the multi-pass reader checks whether it has completed training + passes it has completed. When the underlying reader reaches the + EOF, the multi-pass reader checks whether it has completed training of the given number of pass. If not, the underlying reader will be re-initialized and starts a new pass automatically. )DOC"); diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index 95d8674c08b63e872926ff8708d0c734da33684c..d1cb8e47da70cab784858caea7e791151fc104dd 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -52,8 +52,6 @@ class RandomDataGenerator : public framework::ReaderBase { void ReInit() override { return; } - bool HasNext() const override { return true; } - private: float min_; float max_; @@ -74,7 +72,7 @@ class CreateRandomDataGeneratorOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); std::vector shapes = RestoreShapes(shape_concat, ranks); diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index c4aa29c7206dbd3fe6a99b2a6c5ac6f083621944..2ae29725561769ebe6428002c9983246b8eec724 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -18,6 +18,7 @@ namespace paddle { namespace operators { namespace reader { +template class RecordIOFileReader : public framework::FileReader { public: explicit RecordIOFileReader(const std::string& filename, @@ -25,18 +26,27 @@ class RecordIOFileReader : public framework::FileReader { : FileReader(dims), scanner_(filename), dev_ctx_(*platform::DeviceContextPool::Instance().Get( - platform::CPUPlace())) {} - - bool HasNext() const override { return scanner_.HasNext(); } + platform::CPUPlace())) { + if (ThreadSafe) { + mutex_.reset(new std::mutex()); + } + LOG(INFO) << "Creating file reader" << filename; + } void ReInit() override { scanner_.Reset(); } protected: void ReadNextImpl(std::vector* out) override { - *out = framework::ReadFromRecordIO(scanner_, dev_ctx_); + if (ThreadSafe) { + std::lock_guard guard(*mutex_); + *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); + } else { + *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); + } } private: + std::unique_ptr mutex_; recordio::Scanner scanner_; const platform::DeviceContext& dev_ctx_; }; @@ -52,15 +62,16 @@ class CreateRecordIOReaderOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); std::string filename = Attr("filename"); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset( - new RecordIOFileReader(filename, RestoreShapes(shape_concat, ranks))); + + out->Reset(new RecordIOFileReader( + filename, RestoreShapes(shape_concat, ranks))); } }; @@ -87,4 +98,4 @@ REGISTER_FILE_READER_OPERATOR(create_recordio_file_reader, reader::CreateRecordIOReaderOp, reader::CreateRecordIOReaderOpMaker); -REGISTER_FILE_READER(recordio, reader::RecordIOFileReader); +REGISTER_FILE_READER(recordio, reader::RecordIOFileReader); diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 3a1f3805a0483c2f5eabdc7432556051d8308964..13825d65913be95f4f444bd9d5271a036ec8b1e2 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -30,35 +30,33 @@ class ShuffleReader : public framework::DecoratedReader { std::random_device device; seed_ = device(); } - ReadIntoBuffers(); + ReloadBuffer(); } void ReadNext(std::vector* out) override { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } + out->clear(); if (iteration_pos_ >= buffer_.size()) { VLOG(10) << "Resetting shuffle buffer"; - ReadIntoBuffers(); + ReloadBuffer(); + if (buffer_.empty()) { + return; + } } *out = buffer_[iteration_pos_++]; } - bool HasNext() const override { - return iteration_pos_ < buffer_.size() || reader_->HasNext(); - } - private: - void ReadIntoBuffers() { + void ReloadBuffer() { buffer_.clear(); buffer_.reserve(buffer_size_); iteration_pos_ = 0; for (size_t i = 0; i < buffer_size_; ++i) { - if (!reader_->HasNext()) { + std::vector ins; + reader_->ReadNext(&ins); + if (ins.empty()) { break; } - buffer_.emplace_back(); - reader_->ReadNext(&buffer_.back()); + buffer_.emplace_back(ins); } std::mt19937 g(seed_); std::shuffle(buffer_.begin(), buffer_.end(), g); @@ -80,10 +78,14 @@ class CreateShuffleReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - auto& var = detail::Ref(scope.FindVar(Output("Out"))); - var.GetMutable()->Reset( + out->Reset( new ShuffleReader(underlying_reader.Get(), static_cast(Attr("buffer_size")))); } diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..cbf709d5e734c0f2adf3735dc28043c1340349da --- /dev/null +++ b/paddle/fluid/operators/reader/create_threaded_reader_op.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class ThreadedReader : public framework::DecoratedReader { + public: + ThreadedReader(ReaderBase* reader, bool safe_mode) + : DecoratedReader(reader), safe_mode_(safe_mode) {} + + void ReadNext(std::vector* out) override { + std::lock_guard lock(mutex_); + reader_->ReadNext(out); + } + + void ReInit() override { + if (safe_mode_) { + PADDLE_THROW( + "ThreadedReader::ReInit() is disabled when 'safe_mode' is true."); + } + VLOG(5) << "ThreadedReader::ReInit() is invoked! It might be buggy in " + "multi-thread environment."; + reader_->ReInit(); + } + + private: + bool safe_mode_; + std::mutex mutex_; +}; + +class CreateThreadedReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); + bool safe_mode = Attr("safe_mode"); + out->Reset(new ThreadedReader(underlying_reader.Get(), safe_mode)); + } +}; + +class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { + public: + CreateThreadedReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : DecoratedReaderMakerBase(op_proto, op_checker) { + AddAttr("safe_mode", + "When 'safe_mode' is true, 'ReInit()' is disabled to avoid " + "unexpected bugs in multi-thread environment.") + .SetDefault(true); + AddComment(R"DOC( + CreateThreadedReader Operator + + This operator creates a threaded reader. A threaded reader's + 'ReadNext()' can be invoked by several threads at the same + time. + When the attribute 'safe_mode' is true, the threaded reader's + 'ReInit()' is disabled to avoid unexpected bugs in multi-thread + environment. + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace reader = paddle::operators::reader; +REGISTER_DECORATED_READER_OPERATOR(create_threaded_reader, + reader::CreateThreadedReaderOp, + reader::CreateThreadedReaderOpMaker); diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index b6ac7b21d56f7760b3f4814581c90b0ff2cc4a6a..779dc8a6a0deb7792e0540071e3a2588102fa708 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include // NOLINT + #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" @@ -19,22 +21,23 @@ namespace paddle { namespace operators { namespace reader { -class MultipleReader : public framework::ReaderBase { +class MultiFileReader : public framework::ReaderBase { public: - MultipleReader(const std::vector& file_names, - const std::vector& dims, size_t thread_num) - : file_names_(file_names), dims_(dims) { + MultiFileReader(const std::vector& file_names, + const std::vector& dims, size_t thread_num, + size_t buffer_size) + : file_names_(file_names), dims_(dims), buffer_size_(buffer_size) { prefetchers_.resize(thread_num); StartNewScheduler(); } void ReadNext(std::vector* out) override; - bool HasNext() const override; void ReInit() override; - ~MultipleReader() { EndScheduler(); } + ~MultiFileReader() { EndScheduler(); } private: + bool HasNext(); void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -44,40 +47,36 @@ class MultipleReader : public framework::ReaderBase { std::vector dims_; std::thread scheduler_; std::vector prefetchers_; + size_t buffer_size_; framework::Channel* waiting_file_idx_; framework::Channel* available_thread_idx_; framework::Channel>* buffer_; - mutable std::vector local_buffer_; }; -void MultipleReader::ReadNext(std::vector* out) { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } - - if (local_buffer_.empty()) { - buffer_->Receive(&local_buffer_); +void MultiFileReader::ReadNext(std::vector* out) { + out->clear(); + if (HasNext()) { + buffer_->Receive(out); } - *out = local_buffer_; - local_buffer_.clear(); -} - -bool MultipleReader::HasNext() const { - return local_buffer_.empty() ? buffer_->Receive(&local_buffer_) : true; } -void MultipleReader::ReInit() { +void MultiFileReader::ReInit() { EndScheduler(); - local_buffer_.clear(); StartNewScheduler(); } -void MultipleReader::StartNewScheduler() { +bool MultiFileReader::HasNext() { + while (!buffer_->IsClosed() && !buffer_->CanReceive()) { + } + return buffer_->CanReceive(); +} + +void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); waiting_file_idx_ = framework::MakeChannel(file_names_.size()); available_thread_idx_ = framework::MakeChannel(thread_num); buffer_ = - framework::MakeChannel>(thread_num); + framework::MakeChannel>(buffer_size_); for (size_t i = 0; i < file_names_.size(); ++i) { waiting_file_idx_->Send(&i); @@ -90,7 +89,7 @@ void MultipleReader::StartNewScheduler() { scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); } -void MultipleReader::EndScheduler() { +void MultiFileReader::EndScheduler() { available_thread_idx_->Close(); buffer_->Close(); waiting_file_idx_->Close(); @@ -102,8 +101,8 @@ void MultipleReader::EndScheduler() { delete waiting_file_idx_; } -void MultipleReader::ScheduleThreadFunc() { - VLOG(5) << "MultipleReader schedule thread starts."; +void MultiFileReader::ScheduleThreadFunc() { + VLOG(5) << "MultiFileReader schedule thread starts."; size_t completed_thread_num = 0; size_t thread_idx; while (available_thread_idx_->Receive(&thread_idx)) { @@ -135,17 +134,20 @@ void MultipleReader::ScheduleThreadFunc() { p.join(); } } - VLOG(5) << "MultipleReader schedule thread terminates."; + VLOG(5) << "MultiFileReader schedule thread terminates."; } -void MultipleReader::PrefetchThreadFunc(std::string file_name, - size_t thread_idx) { +void MultiFileReader::PrefetchThreadFunc(std::string file_name, + size_t thread_idx) { VLOG(5) << "The prefetch thread of file '" << file_name << "' starts."; std::unique_ptr reader = CreateReaderByFileName(file_name, dims_); - while (reader->HasNext()) { + while (true) { std::vector ins; reader->ReadNext(&ins); + if (ins.empty()) { + break; + } try { buffer_->Send(&ins); } catch (paddle::platform::EnforceNotMet e) { @@ -176,17 +178,19 @@ class OpenFilesOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); const auto& file_names = Attr>("file_names"); PADDLE_ENFORCE(!file_names.empty(), "No file to be read!"); const size_t thread_num = Attr("thread_num"); + const size_t buffer_size = Attr("buffer_size"); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new MultipleReader( - file_names, RestoreShapes(shape_concat, ranks), thread_num)); + out->Reset(new MultiFileReader(file_names, + RestoreShapes(shape_concat, ranks), + thread_num, buffer_size)); } }; @@ -197,11 +201,12 @@ class OpenFilesOpMaker : public FileReaderMakerBase { AddAttr>("file_names", "Files to be read."); AddAttr("thread_num", "The maximal concurrent prefetch thread number.") .GreaterThan(0); + AddAttr("buffer_size", "The size of prefetch buffer.").GreaterThan(0); AddComment(R"DOC( OpenFiles Operator - An OpenFilesOp creates a MultipleReader, which is able to + An OpenFilesOp creates a MultiFileReader, which is able to read data multi-threaded from multiple files. )DOC"); } diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index 083c1fae5e2016ada6309aba78bdfa6ad7fef89c..a4dcf704a63ae3bad6567ddb042ea23513bccff7 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -19,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include #include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 832509641cc3d5178ff090e05437484d395bfe51..93f9c74b809770136d3d3300e0e0700b1bc0459e 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -14,93 +14,72 @@ limitations under the License. */ #include "paddle/fluid/operators/reshape_op.h" +#include +#include + namespace paddle { namespace operators { -class ReshapeOp : public framework::OperatorWithKernel { - public: - ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorWithKernel(type, inputs, outputs, attrs) {} - - void InferShape(framework::InferShapeContext *ctx) const override { - // input check - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ReshapeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ReshapeOp should not be null."); - - auto shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); - auto x_dims = ctx->GetInputDim("X"); - - std::vector neg_dims_idx; - // set some dimension to -1 if it is unknown - const int unknown_size = -1; - for (size_t i = 0; i < shape.size(); ++i) { - PADDLE_ENFORCE(shape[i] > 0 || shape[i] == unknown_size, - "Each dimension of Attr(shape) must be positive or %d.", - unknown_size); - if (shape[i] == unknown_size) { - neg_dims_idx.push_back(i); - PADDLE_ENFORCE(neg_dims_idx.size() <= 1, - "Only one dimension of Attr(shape) can be unknown."); - } - } - - int64_t capacity = - std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); - int64_t in_size = framework::product(x_dims); - if (neg_dims_idx.size() == 1) { - // dim infer - shape[neg_dims_idx[0]] = in_size / (-capacity); - // recalculate capacity - capacity = shape[neg_dims_idx[0]] * (-capacity); - } - // capacity check - PADDLE_ENFORCE(capacity == in_size, - "The size of Input(X) mismatches with Attr(shape)."); - // resize output - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - auto out_dims = framework::make_ddim(shape_int64); - ctx->SetOutputDim("Out", out_dims); - if (shape[0] == x_dims[0]) { - // Only pass LoD when the first dimension is equal between - // output and input. - ctx->ShareLoD("X", /*->*/ "Out"); - } - } -}; - class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input tensor of reshape operator."); - AddOutput("Out", "The output tensor of reshape operator."); - AddAttr>("shape", - "(vector) " - "Target shape of reshape operator."); + AddInput("X", "(Tensor). The input tensor of reshape operator."); + AddInput("Shape", + "(Tensor, optional). If provided, reshape according to " + "this given shape. That is to say it has a higher priority than " + "the shape attribute, while the shape attribute still should be " + "set correctly to gurantee shape inference in compile time.") + .AsDispensable(); + AddOutput("Out", "(Tensor). The output tensor of reshape operator."); + AddAttr>( + "shape", "(std::vector) Target shape of reshape operator."); AddAttr("inplace", - "Change the source tensor's shape without copy memory.") - .SetDefault(true); + "(default: false) Change the source tensor's shape without " + "memory copy. When Attr(inplace) is set true, the output " + "tensor shares memory with Input(X), otherwise, a new output " + "tensor is created, and its data are copied from Input(x).") + .SetDefault(false); AddComment(R"DOC( Reshape Operator. -Reshape Input(X) into the shape specified by Attr(shape). +Reshape Input(X) into the shape specified by Attr(shape) or Input(Shape). The +data in Input(X) are unchanged. -An example: -Given a 2-D tensor X with 2 rows and 2 columns : [[1, 2], [3, 4]] +Examples: -and target shape = [1, 4], the reshape operator will transform -the tensor X into a 2-D tensor: [[1, 2, 3, 4]] +1. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape +specified by Attr(shape) is [6, 8], the reshape operator will transform Input(X) +into a 2-D tensor with shape [6, 8] and leaving Input(X)'s data unchanged. + +2. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape +specified by Attr(shape) is [2, 3, -1, 2], the reshape operator will transform +Input(X) into a 4-D tensor with shape [2, 3, 4, 2] and leaving Input(X)'s data +unchanged. In this case, one and only dimension of Attr(shape) can be set to -1, +the value of this dimension is inferred from the total element number of +Input(X) and remaining dimensions. + +3. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape +specified by Attr(shape) is [-1, 0, 3, 2], the reshape operator will transform +Input(X) into a 4-D tensor with shape [2, 4, 3, 2] and leaving Input(X)'s data +unchanged. In this case, besides -1, 0 means the actual dimension value is going +to be copied from the corresponding dimension of Input(X). + +Note: + +1. One and only one dimension in Attr(shape) can be set -1. In this case, +the actual dimension value will be infered from the total element number of +Input(X) and remaining dimensions. + +2. More than one dimensions in Attr(shape) can be set to 0, which means the real +dimension value will be copied from Input(X) at runtime. Note that the index of +0 can not exceed Rank(X). For example, Input(X) is a 3-D tensor with shape +[2, 3, 4], Attr(shape) = [2, 3, 2, 0] is an invalid input. + +3. Input(Shape) has a higher priority than Attr(shape) if it is provided, while +Attr(shape) still should be set correctly to gurantee shape inference in +compile-time. -One dimension in the target shape can be set -1, representing that its -size is unknown. In this case, the real dimension will be infered from -the original shape of Input(X) and other dimensions in the target shape. )DOC"); } }; @@ -119,6 +98,14 @@ class ReshapeGradOp : public framework::OperatorWithKernel { "Input(Out@GRAD) shouldn't be null."); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } }; } // namespace operators diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index eacb0a0cf21a60ffbdef5787434859ac549388bc..8320c257c9ab15efec29eabe99eca5b6f74c9e31 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -14,23 +14,141 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ReshapeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ReshapeOp should not be null."); + + const std::vector &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE(!shape.empty(), + "The shape information must be set by Attr(shape)."); + + if (ctx->HasInput("Shape") && ctx->IsRuntime()) { + // If true, set the shape of Output(Out) according to Input(Shape) in + // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel. + ctx->ShareLoD("X", /*->*/ "Out"); + return; + } + + auto x_dims = ctx->GetInputDim("X"); + auto out_dims = ValidateShape(shape, x_dims); + ctx->SetOutputDim("Out", out_dims); + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } + + static framework::DDim ValidateShape(const std::vector shape, + const framework::DDim &in_dims) { + const int64_t in_size = framework::product(in_dims); + // only one dimension can be set to -1, whose size will be automatically + // infered. + const int64_t unk_dim_val = -1; + const int64_t copy_dim_val = 0; + + std::vector output_shape(shape.size(), 0); + int64_t capacity = 1; + int unk_dim_idx = -1; + for (size_t i = 0; i < shape.size(); ++i) { + if (shape[i] == unk_dim_val) { + PADDLE_ENFORCE( + unk_dim_idx == -1, + "Only one input dimension of Attr(shape) can be unknown."); + unk_dim_idx = i; + } else if (shape[i] == copy_dim_val) { + PADDLE_ENFORCE( + static_cast(i) < in_dims.size(), + "The index of dimension to copy from input shape must be less " + "than the size of input shape."); + } else { + PADDLE_ENFORCE( + shape[i] > 0, + "Each input dimension of Attr(shape) must not be negtive except " + "one unknown dimension."); + } + + capacity *= (shape[i] ? shape[i] : in_dims[i]); + output_shape[i] = + (shape[i] ? static_cast(shape[i]) : in_dims[i]); + } + + if (unk_dim_idx != -1) { + output_shape[unk_dim_idx] = -in_size / capacity; + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } else { + PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); + } + return framework::make_ddim(output_shape); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + template class ReshapeKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const { - auto* out = ctx.Output("Out"); - auto* in = ctx.Input("X"); + void Compute(const framework::ExecutionContext &ctx) const { + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); + auto *shape_tensor = ctx.Input("Shape"); + + framework::DDim out_dims = out->dims(); + + if (shape_tensor) { + auto *shape_data = shape_tensor->data(); + framework::Tensor cpu_shape_tensor; + if (platform::is_gpu_place(ctx.GetPlace())) { + TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), + &cpu_shape_tensor); + shape_data = cpu_shape_tensor.data(); + ctx.device_context().Wait(); + } + auto shape = + std::vector(shape_data, shape_data + shape_tensor->numel()); + out_dims = ReshapeOp::ValidateShape(shape, in->dims()); + } + if (!in->lod().empty()) { + PADDLE_ENFORCE_EQ( + out_dims[0], in->dims()[0], + "Reshape operator cannot reshape an input sequence batch " + "into an output sequence batch that has a different " + "number of time steps. Please consider using " + "sequence_reshape op."); + } + bool inplace = ctx.Attr("inplace"); - auto out_dims = out->dims(); + out->Resize(out_dims); if (!inplace) { out->mutable_data(ctx.GetPlace()); framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); + ctx.device_context().Wait(); + // TensorCopy will resize to in_dims. out->Resize(out_dims); } else { out->ShareDataWith(*in); @@ -42,15 +160,17 @@ class ReshapeKernel : public framework::OpKernel { template class ReshapeGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const { - auto* d_out = ctx.Input(framework::GradVarName("Out")); - auto* d_x = ctx.Output(framework::GradVarName("X")); + void Compute(const framework::ExecutionContext &ctx) const { + auto *d_out = ctx.Input(framework::GradVarName("Out")); + auto *d_x = ctx.Output(framework::GradVarName("X")); + d_x->mutable_data(ctx.GetPlace()); bool inplace = ctx.Attr("inplace"); auto in_dims = d_x->dims(); if (!inplace) { framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); + ctx.device_context().Wait(); d_x->Resize(in_dims); } else { d_x->ShareDataWith(*d_out); diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index f38c5a3c0c9952b37f7db468ea00470a00b5ff6f..54e07490319cf1da749bd33449a7b51efd6c3d65 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index b16d06df8d0f7f57a5ec2f2be9a2cbb12a8ba55d..1e938638c9182972a2ae2436166ff0aa49efd4be 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/scale_op.h" -#include "paddle/fluid/operators/net_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu index ef7d700659d8d713715a10910baf739954ba0786..a70b9091727935ddcbb83dd5775729969f7d64e5 100644 --- a/paddle/fluid/operators/scatter_op.cu +++ b/paddle/fluid/operators/scatter_op.cu @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "gather.cu.h" +#include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/gather_op.h" -#include "scatter.cu.h" +#include "paddle/fluid/operators/scatter.cu.h" +#include "paddle/fluid/operators/scatter_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h index 2151d8a9240fc88966533f4a07d5cf56b6c1c3bc..d29947b55e751a3e7993f765198364f4debe2472 100644 --- a/paddle/fluid/operators/scatter_op.h +++ b/paddle/fluid/operators/scatter_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "gather.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "scatter.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/operators/scatter.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_test.cc b/paddle/fluid/operators/scatter_test.cc index b67af3c3710eafc57b660a48e4c340d5eefe7e5b..750245153a7df6c4a7ce088038005dcab1685b5f 100644 --- a/paddle/fluid/operators/scatter_test.cc +++ b/paddle/fluid/operators/scatter_test.cc @@ -13,44 +13,48 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/scatter.h" -#include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/place.h" - #include #include #include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" TEST(scatter, ScatterUpdate) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators; + // using namespace paddle::framework; + // using namespace paddle::platform; + // using namespace paddle::operators; - Tensor* src = new Tensor(); - Tensor* index = new Tensor(); - Tensor* output = new Tensor(); + paddle::framework::Tensor* src = new paddle::framework::Tensor(); + paddle::framework::Tensor* index = new paddle::framework::Tensor(); + paddle::framework::Tensor* output = new paddle::framework::Tensor(); float* p_src = nullptr; int* p_index = nullptr; - p_src = src->mutable_data(make_ddim({1, 4}), CPUPlace()); - p_index = index->mutable_data(make_ddim({1}), CPUPlace()); + p_src = src->mutable_data(paddle::framework::make_ddim({1, 4}), + paddle::platform::CPUPlace()); + p_index = index->mutable_data(paddle::framework::make_ddim({1}), + paddle::platform::CPUPlace()); - for (size_t i = 0; i < 4; ++i) p_src[i] = float(i); + for (size_t i = 0; i < 4; ++i) p_src[i] = static_cast(i); p_index[0] = 1; - float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); + float* p_output = output->mutable_data( + paddle::framework::make_ddim({4, 4}), paddle::platform::CPUPlace()); auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); - ScatterAssign(ctx, *src, *index, output); + paddle::operators::ScatterAssign(ctx, *src, *index, output); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); - for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], float(i - 4)); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], 0.0f); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], 0.0f); + for (size_t i = 4; i < 8; ++i) { + EXPECT_EQ(p_output[i], static_cast(i - 4)); + } for (size_t i = 4; i < 8; ++i) - EXPECT_EQ(output->data()[i], float(i - 4)); - for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], float(0)); - for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], float(0)); + EXPECT_EQ(output->data()[i], static_cast(i - 4)); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], 0.0f); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], 0.0f); delete src; delete index; diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 8d02a6f29177536562e38372eb0336424aa0a47c..12b844daaa33162b86b7daffa2e4c49785701662 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -19,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include #include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index fdf3c06ef0a7c2daa7c484375065ac2110e07478..82ff087d0a7a4b482aef842e618f593b17dca171 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -12,35 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" - -#include #include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/send_recv_util.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { -static bool NeedSend(const framework::Scope& scope, - const std::string& varname) { - auto* var = scope.FindVar(varname); - PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", - varname); - if (var->IsType()) { - return var->Get().IsInitialized(); - } else if (var->IsType()) { - return var->Get().rows().size() > 0UL; - } else { - PADDLE_THROW( - "Variable type in send side should be in " - "[LodTensor, SelectedRows]"); - } - return false; -} class SendOp : public framework::OperatorBase { public: @@ -72,7 +56,7 @@ class SendOp : public framework::OperatorBase { for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { - VLOG(2) << "sending " << ins[i] << " to " << epmap[i]; + VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); } else { VLOG(3) << "don't send no-initialied variable: " << ins[i]; @@ -81,7 +65,7 @@ class SendOp : public framework::OperatorBase { PADDLE_ENFORCE(rpc_client->Wait()); for (auto& ep : endpoints) { - VLOG(2) << "batch barrier, ep: " << ep; + VLOG(3) << "batch barrier, ep: " << ep; rpc_client->AsyncSendBatchBarrier(ep); } PADDLE_ENFORCE(rpc_client->Wait()); diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index e9fb845b475ff5776bf948ab120a44c16ed87aa0..3bf5d57809019d3ae469471c2ee2e7aac70b9faf 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -14,12 +14,13 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/listen_and_serv_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/string/printf.h" @@ -34,12 +35,13 @@ namespace m = paddle::operators::math; // global for simplicity. std::unique_ptr listen_and_serv_op; +int selected_port; -void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { +void InitTensorsInScope(const p::CPUPlace &place, f::Scope *scope) { p::CPUDeviceContext ctx(place); for (int i = 0; i < 2; ++i) { auto var_name = paddle::string::Sprintf("x%d", i); - auto var = scope.Var(var_name); + auto var = scope->Var(var_name); auto tensor = var->GetMutable(); tensor->Resize({10, 10}); float *expect = tensor->mutable_data(place); @@ -48,20 +50,20 @@ void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { } } - auto out_var = scope.Var("Out"); + auto out_var = scope->Var("Out"); auto out_tensor = out_var->GetMutable(); out_tensor->Resize({10, 10}); out_tensor->mutable_data(place); // allocate } -void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { +void InitSelectedRowsInScope(const p::CPUPlace &place, f::Scope *scope) { p::CPUDeviceContext ctx(place); int64_t height = 10; int64_t row_numel = 10; m::SetConstant set_one; // init x0 std::vector rows0{0, 4, 7}; - auto x0_var = scope.Var("x0"); + auto x0_var = scope->Var("x0"); auto x0 = x0_var->GetMutable(); x0->set_rows(rows0); x0->set_height(height); @@ -72,7 +74,7 @@ void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { // init x1 std::vector rows1{2, 9}; - auto x1_var = scope.Var("x1"); + auto x1_var = scope->Var("x1"); auto x1 = x1_var->GetMutable(); x1->set_rows(rows1); x1->set_height(height); @@ -81,7 +83,7 @@ void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { f::make_ddim({static_cast(rows1.size()), row_numel}), place); set_one(ctx, x1_value, 1.0); - auto out_var = scope.Var("Out"); + auto out_var = scope->Var("Out"); auto out = out_var->GetMutable(); auto out_value = out->mutable_value(); out->set_height(height); @@ -115,26 +117,31 @@ void StartServerNet(bool is_sparse) { f::Scope scope; p::CPUPlace place; if (is_sparse) { - InitSelectedRowsInScope(scope, place); + InitSelectedRowsInScope(place, &scope); } else { - InitTensorsInScope(scope, place); + InitTensorsInScope(place, &scope); } // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; - f::BlockDesc *optimize_block = program.MutableBlock(0); + const auto &root_block = program.Block(0); + auto *optimize_block = program.AppendBlock(root_block); + auto *prefetch_block = program.AppendBlock(root_block); // X for server side tensors, RX for received tensers, must be of same shape. AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); f::AttributeMap attrs; - attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + attrs.insert({"endpoint", std::string("127.0.0.1:0")}); attrs.insert({"Fanin", 1}); attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); + attrs.insert({"PrefetchBlock", prefetch_block}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); + LOG(INFO) << "selected port before run " << selected_port; listen_and_serv_op->Run(scope, place); + LOG(INFO) << "server exit"; } TEST(SendRecvOp, CPUDense) { @@ -143,17 +150,24 @@ TEST(SendRecvOp, CPUDense) { // local net f::Scope scope; p::CPUPlace place; - InitTensorsInScope(scope, place); + InitTensorsInScope(place, &scope); // create rpc client var scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); - attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); + selected_port = static_cast( + listen_and_serv_op.get()) + ->GetSelectedPort(); + LOG(INFO) << "selected port " << selected_port; + std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); + attrs.insert({"endpoints", std::vector({endpoint})}); + attrs.insert({"epmap", std::vector({endpoint})}); auto send_op = f::OpRegistry::CreateOp( "send", {{"X", {"x1"}}}, {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); + LOG(INFO) << "before run " << endpoint; send_op->Run(scope, place); + LOG(INFO) << "end run"; auto in_var = scope.Var("x1"); auto tensor = in_var->GetMutable(); @@ -166,6 +180,7 @@ TEST(SendRecvOp, CPUDense) { for (int64_t i = 0; i < target->numel(); ++i) { EXPECT_EQ(expected[i] * 2, actual[i]); } + LOG(INFO) << "before stop"; listen_and_serv_op->Stop(); server_thread.join(); listen_and_serv_op.reset(nullptr); @@ -178,11 +193,16 @@ TEST(SendRecvOp, CPUSparse) { f::Scope scope; p::CPUPlace place; p::CPUDeviceContext ctx(place); - InitSelectedRowsInScope(scope, place); + InitSelectedRowsInScope(place, &scope); scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); - attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); + selected_port = static_cast( + listen_and_serv_op.get()) + ->GetSelectedPort(); + LOG(INFO) << "selected port " << selected_port; + std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); + attrs.insert({"endpoints", std::vector({endpoint})}); + attrs.insert({"epmap", std::vector({endpoint})}); auto send_op = f::OpRegistry::CreateOp( "send", {{"X", {"x1"}}}, {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); diff --git a/paddle/fluid/operators/send_recv_util.h b/paddle/fluid/operators/send_recv_util.h new file mode 100644 index 0000000000000000000000000000000000000000..113513eb6b327773ab4a1c062fb8a3f06fddfbca --- /dev/null +++ b/paddle/fluid/operators/send_recv_util.h @@ -0,0 +1,39 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include + +namespace paddle { +namespace operators { + +inline bool NeedSend(const framework::Scope& scope, + const std::string& varname) { + auto* var = scope.FindVar(varname); + PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", + varname); + if (var->IsType()) { + return var->Get().IsInitialized(); + } else if (var->IsType()) { + return var->Get().rows().size() > 0UL; + } else { + PADDLE_THROW( + "Variable type in send side should be in " + "[LodTensor, SelectedRows]"); + } + return false; +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/send_vars_op.cc b/paddle/fluid/operators/send_vars_op.cc index 523e9e27808e428acb7900fe90a29de80f316bfb..56b3713d6af28d0787e114a672a503e86cbd85fd 100644 --- a/paddle/fluid/operators/send_vars_op.cc +++ b/paddle/fluid/operators/send_vars_op.cc @@ -12,34 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" - -#include #include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/send_recv_util.h" namespace paddle { namespace operators { -static bool NeedSend(const framework::Scope& scope, - const std::string& varname) { - auto* var = scope.FindVar(varname); - PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", - varname); - if (var->IsType()) { - return var->Get().IsInitialized(); - } else if (var->IsType()) { - return var->Get().rows().size() > 0UL; - } else { - PADDLE_THROW( - "Variable type in send side should be in " - "[LodTensor, SelectedRows]"); - } - return false; -} class SendVarsOp : public framework::OperatorBase { public: @@ -53,7 +36,7 @@ class SendVarsOp : public framework::OperatorBase { auto ins = Inputs("X"); std::vector epmap = Attr>("epmap"); - int sync_send = Attr("sync_sent"); + int sync_send = Attr("sync_send"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); @@ -95,7 +78,7 @@ Send operator This operator will send variables to listen_and_serve op at the parameter server. )DOC"); - AddAttr("ync_send", + AddAttr("sync_send", "(int, default 0)" "sync send or async send.") .SetDefault(0); diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h index 9f04c4199130de3cead6f23ef111453ca752c0e3..71c9f45287c29628a2f2c8c649e9e5270317ef6a 100644 --- a/paddle/fluid/operators/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_concat_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h index ee48339c52e348e7b3060bbdd462177375aee9f5..b59504bb9893b720247841bdad5aa577992b7fb6 100644 --- a/paddle/fluid/operators/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_conv_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/context_project.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index 32b9d7f7c1528a365cd21122e4df0e3c1407a49e..73c0e89512972cda002bd902ee0c78b4b77d8502 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_erase_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_erase_op.h index b490c34f543875f73e0862c08c25bcb57611e2f4..265390528a15aa060900276f98128d754fc907fe 100644 --- a/paddle/fluid/operators/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_erase_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index 786fe63e7580ce16b946d5049a490eed2c3c6ced..ae52849162ae4d78cc69ddbb98f58059f55683cb 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -84,12 +84,11 @@ class SequenceExpandOp : public framework::OperatorWithKernel { } } out_dims[0] = out_first_dim; - ctx->SetOutputDim("Out", out_dims); } else { out_dims[0] = -1; - ctx->SetOutputDim("Out", out_dims); - ctx->ShareLoD("X", /*->*/ "Out"); } + ctx->SetOutputDim("Out", out_dims); + ctx->ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index bb51bb2902eea797de3449fcb6c8b52b4f0e7fbf..c00765e5d59af068e5682b39ebace5f3d7a62250 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -12,8 +12,135 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#define EIGEN_USE_GPU +#include #include "paddle/fluid/operators/sequence_expand_op.h" +#include "paddle/fluid/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; + +template +__global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod, + const size_t* ref_lod, + const size_t* offset, + const size_t lod_size, + /* default=1, + the instance length*/ + const int x_item_length, T* out_data) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + + int x_item_count = x_lod[bid + 1] - x_lod[bid]; + int repeats = ref_lod[bid + 1] - ref_lod[bid]; + int out_offset = static_cast(offset[bid]); + int x_offset = x_lod[bid]; + for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { + for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { + for (int tid_x = threadIdx.x; tid_x < x_item_length; + tid_x += blockDim.x) { + out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + + tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x]; + } + } + } +} + +template +__global__ void sequence_expand_grad_kernel( + const T* dout_data, const size_t* ref_lod, const size_t* dx_lod, + const size_t* offset, const size_t lod_size, + /* default=1, + the instance length*/ + const int x_item_length, T* dx_data) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + int x_item_count = dx_lod[bid + 1] - dx_lod[bid]; + int repeats = ref_lod[bid + 1] - ref_lod[bid]; + int out_offset = static_cast(offset[bid]); + int x_offset = dx_lod[bid]; + + for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { + for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { + for (int tid_x = threadIdx.x; tid_x < x_item_length; + tid_x += blockDim.x) { + platform::CudaAtomicAdd( + &dx_data[(x_offset + tid_y) * x_item_length + tid_x], + dout_data[(out_offset + tid_z * x_item_count + tid_y) * + x_item_length + + tid_x]); + } + } + } +} + +void GetOutputOffset(const framework::Vector& x_lod, + const framework::Vector& ref_lod, + framework::Vector* out_offset) { + size_t offset = 0; + int lod_size = static_cast(x_lod.size()); + for (int i = 0; i < static_cast(x_lod.size()); ++i) { + (*out_offset)[i] = offset; + if (i < lod_size - 1) { + offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]); + } + } +} + +template +struct SequenceExpandFunctor { + void operator()( + const platform::CUDADeviceContext& context, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out) { + int x_item_length = x.numel() / x.dims()[0]; + framework::Vector out_offset(x_lod.size()); + GetOutputOffset(x_lod, ref_lod, &out_offset); + + int thread_x = std::min(32, std::max(static_cast(ref_lod.size()), 16)); + int thread_y = 16; + int thread_z = 1024 / thread_x / thread_y; + int block_x = static_cast(ref_lod.size()); + dim3 block_size(thread_x, thread_y, thread_z); + dim3 grid_size(block_x, 1); + + sequence_expand_kernel<<>>( + x.data(), x_lod.CUDAData(context.GetPlace()), + ref_lod.CUDAData(context.GetPlace()), + out_offset.CUDAData(context.GetPlace()), x_lod.size(), x_item_length, + out->mutable_data(context.GetPlace())); + } +}; + +template +struct SequenceExpandGradFunctor { + void operator()(const platform::CUDADeviceContext& context, + const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand based lod*/ + LoDTensor* dx) { + int x_item_length = framework::product(dx->dims()) / dx->dims()[0]; + framework::Vector out_offset(x_lod.size()); + GetOutputOffset(x_lod, ref_lod, &out_offset); + + int thread_x = std::min(32, std::max(static_cast(ref_lod.size()), 16)); + int thread_y = 16; + int thread_z = 1024 / thread_x / thread_y; + int block_x = static_cast(ref_lod.size()); + dim3 block_size(thread_x, thread_y, thread_z); + dim3 grid_size(block_x, 1); + sequence_expand_grad_kernel<<>>( + dout.data(), ref_lod.CUDAData(context.GetPlace()), + x_lod.CUDAData(context.GetPlace()), + out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length, + dx->mutable_data(context.GetPlace())); + } +}; + +} // namespace operators +} // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index db7d8bd6821fabd9714a160970558291ec47197f..d62c387c3eebf9df0ab532f4e891da006f239468 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include // std::iota #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" @@ -26,6 +27,57 @@ template using EigenMatrix = framework::EigenMatrix; +template +struct SequenceExpandFunctor { + void operator()( + const DeviceContext& ctx, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out); +}; + +template +struct SequenceExpandGradFunctor { + void operator()( + const DeviceContext& ctx, const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* dx); +}; + +template +struct SequenceExpandFunctor { + void operator()( + const platform::CPUDeviceContext& context, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out) { + int out_offset = 0; + auto& eigen_place = *context.eigen_device(); + for (size_t i = 1; i < ref_lod.size(); ++i) { + int repeat_num = ref_lod[i] - ref_lod[i - 1]; + int x_start = x_lod[i - 1]; + int x_end = x_lod[i]; + int x_seq_len = x_end - x_start; + if (repeat_num > 0) { + auto x_sub_tensor = x.Slice(x_start, x_end); + x_sub_tensor.Resize({1, x_sub_tensor.numel()}); + int out_start = out_offset; + if (out->lod().size() == 1) { + out_start = out->lod()[0][out_offset]; + } + auto out_sub_tensor = + out->Slice(out_start, out_start + x_seq_len * repeat_num); + out_sub_tensor.Resize({repeat_num, x_sub_tensor.dims()[1]}); + EigenMatrix::From(out_sub_tensor).device(eigen_place) = + EigenMatrix::From(x_sub_tensor) + .broadcast(Eigen::array({{repeat_num, 1}})); + } + out_offset += repeat_num; + } + } +}; + template class SequenceExpandKernel : public framework::OpKernel { public: @@ -47,45 +99,36 @@ class SequenceExpandKernel : public framework::OpKernel { return; } - auto& out_lod = *out->mutable_lod(); + // x lod level is at most 1. + framework::Vector out_lod; if (x_lod.size() == 1) { - out_lod.resize(1); - out_lod[0] = {0}; - } - - int out_offset = 0; - auto& eigen_place = - *context.template device_context().eigen_device(); - for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { - int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; - int x_start = i - 1; - int x_end = i; - if (x_lod.size() == 1) { - x_start = x_lod[0][i - 1]; - x_end = x_lod[0][i]; - } - int x_seq_len = x_end - x_start; - if (repeat_num > 0) { - auto x_sub_tensor = x->Slice(x_start, x_end); - x_sub_tensor.Resize({1, x_sub_tensor.numel()}); - int out_start = out_offset; - if (x_lod.size() == 1) { - out_start = out_lod[0][out_offset]; - } - auto out_sub_tensor = - out->Slice(out_start, out_start + x_seq_len * repeat_num); - out_sub_tensor.Resize({repeat_num, x_sub_tensor.dims()[1]}); - EigenMatrix::From(out_sub_tensor).device(eigen_place) = - EigenMatrix::From(x_sub_tensor) - .broadcast(Eigen::array({{repeat_num, 1}})); - } - for (int j = 0; j < repeat_num; ++j) { - if (x_lod.size() == 1) { - out_lod[0].push_back(out_lod[0].back() + x_seq_len); + out_lod.push_back(0); + int out_offset = 0; + for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { + int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; + int x_start = x_lod[0][i - 1]; + int x_end = x_lod[0][i]; + int x_seq_len = x_end - x_start; + for (int j = 0; j < repeat_num; ++j) { + out_lod.push_back(out_lod.back() + x_seq_len); + out_offset++; } - out_offset++; } + // write lod to out if x has lod + auto& ref_lod = *out->mutable_lod(); + ref_lod[0] = out_lod; } + framework::Vector ref_x_lod; + if (x->lod().size() == 1) { + ref_x_lod = x->lod()[0]; + } else { + // x_lod doesn't has lod, use fake x lod, level = 0 + ref_x_lod.resize(x->dims()[0] + 1); + std::iota(ref_x_lod.begin(), ref_x_lod.end(), 0); + } + SequenceExpandFunctor functor; + functor(context.template device_context(), *x, ref_x_lod, + y_lod[ref_level], out); } }; @@ -101,6 +144,36 @@ class SequenceExpandKernel : public framework::OpKernel { * Grad(X).lod = Input(X).lod * * */ +template +struct SequenceExpandGradFunctor { + void operator()( + const platform::CPUDeviceContext& context, const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* dx) { + math::SetConstant set_zero; + set_zero(context, dx, static_cast(0)); + + int dout_offset = 0; + for (size_t i = 1; i < ref_lod.size(); ++i) { + int repeat_num = ref_lod[i] - ref_lod[i - 1]; + if (repeat_num > 0) { + int x_start = x_lod[i - 1]; + int x_end = x_lod[i]; + int x_seq_len = x_end - x_start; + auto dx_sub = dx->Slice(x_start, x_end); + dx_sub.Resize(flatten_to_1d(dx_sub.dims())); + int dout_end = dout_offset + repeat_num * x_seq_len; + auto dout_sub = dout.Slice(dout_offset, dout_end); + dout_sub.Resize({repeat_num, dx_sub.dims()[0]}); + math::ColwiseSum col_sum; + col_sum(context, dout_sub, &dx_sub); + dout_offset += repeat_num * x_seq_len; + } + } + } +}; + template class SequenceExpandGradKernel : public framework::OpKernel { public: @@ -114,43 +187,26 @@ class SequenceExpandGradKernel : public framework::OpKernel { g_x->mutable_data(context.GetPlace()); g_x->set_lod(x->lod()); - auto& x_lod = x->lod(); auto& y_lod = y->lod(); - if (ref_level == -1) ref_level = y_lod.size() - 1; - // just copy the gradient if (y_lod[ref_level].size() <= 1) { framework::TensorCopy(*g_out, context.GetPlace(), g_x); return; } - auto& dev_ctx = context.template device_context(); - - math::SetConstant set_zero; - set_zero(dev_ctx, g_x, static_cast(0)); - - int g_out_offset = 0; - for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { - int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; - if (repeat_num > 0) { - int x_start = i - 1; - int x_end = i; - if (x_lod.size() == 1) { - x_start = x_lod[0][i - 1]; - x_end = x_lod[0][i]; - } - int x_seq_len = x_end - x_start; - auto g_x_sub = g_x->Slice(x_start, x_end); - g_x_sub.Resize(flatten_to_1d(g_x_sub.dims())); - int g_out_end = g_out_offset + repeat_num * x_seq_len; - auto g_out_sub = g_out->Slice(g_out_offset, g_out_end); - g_out_sub.Resize({repeat_num, g_x_sub.dims()[0]}); - math::ColwiseSum col_sum; - col_sum(dev_ctx, g_out_sub, &g_x_sub); - g_out_offset += repeat_num * x_seq_len; - } + framework::Vector ref_x_lod; + framework::Vector ref_lod = y_lod[ref_level]; + if (x->lod().size() == 1) { + ref_x_lod = x->lod()[0]; + } else { + // x_lod doesn't has lod, use fake x lod, level = 0 + ref_x_lod.resize(x->dims()[0] + 1); + std::iota(ref_x_lod.begin(), ref_x_lod.end(), 0); } + SequenceExpandGradFunctor functor; + functor(context.template device_context(), *g_out, ref_x_lod, + ref_lod, g_x); } }; diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 3d4d54a3a3f292d34b1a7645a0db4bdd3208ba6d..933c8c26239d49221819a583f999389ed6fb6cb6 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_pool_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h index 8706ff14aa20714e77d5625fc1f6287ee9b4a8a6..2aa20792f24305a106c500a3d7a6e3d363bc31d8 100644 --- a/paddle/fluid/operators/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_pool_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" @@ -23,12 +24,6 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template -using EigenVector = framework::EigenVector; -template -using EigenMatrix = framework::EigenMatrix; template class SequencePoolKernel : public framework::OpKernel { @@ -37,11 +32,13 @@ class SequencePoolKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* out = context.Output("Out"); std::string pooltype = context.Attr("pooltype"); + Tensor* index = nullptr; + if (pooltype == "MAX") { + index = context.Output("MaxIndex"); + } auto dims = in->dims(); auto lod = in->lod(); - int64_t w = in->numel() / dims[0]; - // InferShape by lod PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_GE( @@ -50,45 +47,14 @@ class SequencePoolKernel : public framework::OpKernel { "The first dimension of Input(X) must be large than batch size."); dims[0] = lod[0].size() - 1; out->Resize({dims}); - - auto lod_level_0 = lod[0]; - out->mutable_data(context.GetPlace()); - auto& dev_ctx = context.template device_context(); if (pooltype == "MAX") { - math::MaxSeqPoolFunctor max_pool; - auto* index = context.Output("MaxIndex"); index->Resize({dims}); index->mutable_data(context.GetPlace()); - max_pool(dev_ctx, *in, out, index); - return; - } - - auto& place = - *context.template device_context().eigen_device(); - for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { - Tensor in_t = in->Slice(static_cast(lod_level_0[i]), - static_cast(lod_level_0[i + 1])); - Tensor out_t = out->Slice(i, i + 1); - int64_t h = static_cast(lod_level_0[i + 1] - lod_level_0[i]); - auto in_e = EigenMatrix::From(in_t, framework::make_ddim({h, w})); - auto out_e = EigenVector::Flatten(out_t); - - if (pooltype == "AVERAGE") { - out_e.device(place) = in_e.mean(Eigen::array({{0}})); - } else if (pooltype == "SUM") { - out_e.device(place) = in_e.sum(Eigen::array({{0}})); - } else if (pooltype == "SQRT") { - out_e.device(place) = in_e.sum(Eigen::array({{0}})) / - std::sqrt(static_cast(h)); - } else if (pooltype == "LAST") { - out_e.device(place) = in_e.chip(h - 1, 0); - } else if (pooltype == "FIRST") { - out_e.device(place) = in_e.chip(0, 0); - } else { - PADDLE_THROW("unsupported pooling pooltype"); - } } + math::SequencePoolFunctor pool; + pool(context.template device_context(), pooltype, *in, out, + index); } }; @@ -96,58 +62,17 @@ template class SequencePoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* in = context.Input("X"); auto* out_g = context.Input(framework::GradVarName("Out")); auto* in_g = context.Output(framework::GradVarName("X")); std::string pooltype = context.Attr("pooltype"); - - auto dims = in->dims(); - auto lod = in->lod()[0]; - int64_t w = in->numel() / dims[0]; - - in_g->mutable_data(context.GetPlace()); - auto& dev_ctx = context.template device_context(); - + const Tensor* index = nullptr; if (pooltype == "MAX") { - math::MaxSeqPoolGradFunctor max_pool_grad; - auto* index = context.Input("MaxIndex"); - max_pool_grad(dev_ctx, *out_g, *index, in_g); - return; - } - - if (pooltype == "LAST" || pooltype == "FIRST") { - // set X@Grad be zero at first when pooltype is LAST/FIRST - math::SetConstant functor; - functor(dev_ctx, in_g, 0); - } - auto& place = - *context.template device_context().eigen_device(); - - for (int i = 0; i < static_cast(lod.size()) - 1; ++i) { - auto in_g_t = - in_g->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); - auto out_g_t = out_g->Slice(i, i + 1); - int64_t h = static_cast(lod[i + 1] - lod[i]); - auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); - auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); - auto out_g_e_v = EigenVector::Flatten(out_g_t); - Eigen::DSizes bcast(h, 1); - - if (pooltype == "AVERAGE") { - in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); - } else if (pooltype == "SUM") { - in_g_e.device(place) = (out_g_e).broadcast(bcast); - } else if (pooltype == "SQRT") { - in_g_e.device(place) = - (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); - } else if (pooltype == "LAST") { - in_g_e.chip(h - 1, 0).device(place) = out_g_e_v; - } else if (pooltype == "FIRST") { - in_g_e.chip(0, 0).device(place) = out_g_e_v; - } else { - PADDLE_THROW("unsupported pooling pooltype"); - } + index = context.Input("MaxIndex"); } + in_g->mutable_data(context.GetPlace()); + math::SequencePoolGradFunctor pool; + pool(context.template device_context(), pooltype, *out_g, + in_g, index); } }; diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index e8b4df04286d327f568f4c43886f9fcf89cc4a88..d2c1317bef95deca36f7f4198407f5350a1be035 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_softmax_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index d0aa2f9cbadaadf4e7e625628d9db5677d50d277..06cb0550ad7d4ad0241a4f439ea9ac16d9714c38 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -35,17 +35,16 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 element"); auto param_dim = ctx->GetInputDim("Param"); - // TODO(qijun): check dimensions of Param and Grad at complie - // and run time. + // TODO(qijun): check dimensions of Param and Grad at compile + // and runtime. ctx->SetOutputDim("ParamOut", param_dim); } protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("Param")->type()), - ctx.GetPlace()); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("Param")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -53,10 +52,12 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "(Tensor) Input parameter"); + AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); - AddInput("Grad", "(Tensor) Input gradient"); - AddOutput("ParamOut", "(Tensor) Output parameter"); + AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); + AddOutput("ParamOut", + "(Tensor or SelectedRows, same with Param) " + "Output parameter, should share the same memory with Param"); AddComment(R"DOC( SGD operator diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index 0ad801079400f1830d85a945e57a434a86adeb00..cfc8793e1e05a7d4fa9207ae77a664b391b9a986 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -23,60 +23,98 @@ namespace operators { template class SGDOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* param = ctx.Input("Param"); - auto* param_out = ctx.Output("ParamOut"); - auto* learning_rate = ctx.Input("LearningRate"); - - auto* grad_var = ctx.InputVar("Grad"); - // Actually, all tensors are LoDTensor except SelectedRows. - if (grad_var->IsType()) { - param_out->mutable_data(ctx.GetPlace()); - auto* grad = ctx.Input("Grad"); - - auto p = framework::EigenVector::Flatten(*param); - auto g = framework::EigenVector::Flatten(*grad); - auto o = framework::EigenVector::Flatten(*param_out); - auto* lr = learning_rate->data(); - - o = p - lr[0] * g; - } else if (grad_var->IsType()) { - // TODO(qijun): In Sparse SGD operator, in-place update is enforced. - // This manual optimization brings difficulty to track data dependency. - // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); - auto* grad = ctx.Input("Grad"); + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *learning_rate = ctx.Input("LearningRate"); + + const auto *param_var = ctx.InputVar("Param"); + const auto *grad_var = ctx.InputVar("Grad"); + + if (param_var->IsType()) { + const auto *param = ctx.Input("Param"); + auto *param_out = ctx.Output("ParamOut"); + + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + const auto *grad = ctx.Input("Grad"); + + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto *lr = learning_rate->data(); + + o = p - lr[0] * g; + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + const auto *grad = ctx.Input("Grad"); + + // for distributed training, a sparse var may be empty, + // just skip updating. + if (grad->rows().size() == 0) { + return; + } + + auto grad_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(grad_height, out_dims[0]); + + auto &grad_value = grad->value(); + auto &grad_rows = grad->rows(); + + size_t grad_row_numel = grad_value.numel() / grad_rows.size(); + PADDLE_ENFORCE_EQ(static_cast(grad_row_numel), + param_out->numel() / grad_height); + + auto *grad_data = grad_value.data(); + auto *out_data = param_out->data(); + auto *lr = learning_rate->data(); + for (size_t i = 0; i < grad_rows.size(); i++) { + PADDLE_ENFORCE(grad_rows[i] < grad_height, + "Input rows index should less than height"); + for (size_t j = 0; j < grad_row_numel; j++) { + out_data[grad_rows[i] * grad_row_numel + j] -= + lr[0] * grad_data[i * grad_row_numel + j]; + } + } + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } + } else if (param_var->IsType()) { + PADDLE_ENFORCE(grad_var->IsType(), + "when param " + "is SelectedRows, gradient should also be SelectedRows"); + const auto ¶m = param_var->Get(); + auto *param_out = ctx.Output("ParamOut"); + const auto &grad = grad_var->Get(); // for distributed training, a sparse var may be empty, // just skip updating. - if (grad->rows().size() == 0) { + if (grad.rows().size() == 0) { return; } - auto in_height = grad->height(); - auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = grad->value(); - auto& in_rows = grad->rows(); + size_t param_row_width = param.value().numel() / param.rows().size(); + size_t grad_row_width = grad.value().numel() / grad.rows().size(); + PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, + "param_row should have the same size with grad_row"); - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = param_out->data(); - auto* lr = learning_rate->data(); - for (size_t i = 0; i < in_rows.size(); i++) { - PADDLE_ENFORCE(in_rows[i] < in_height, + const auto *lr = learning_rate->data(); + const auto *grad_data = grad.value().data(); + auto *out_data = param_out->mutable_value()->data(); + for (size_t i = 0; i < grad.rows().size(); i++) { + PADDLE_ENFORCE(grad.rows()[i] < grad.height(), "Input rows index should less than height"); - for (int64_t j = 0; j < in_row_numel; j++) { - out_data[in_rows[i] * in_row_numel + j] -= - lr[0] * in_data[i * in_row_numel + j]; + int64_t id_index = param.index(grad.rows()[i]); + for (size_t j = 0; j < grad_row_width; j++) { + out_data[id_index * grad_row_width + j] -= + lr[0] * grad_data[i * grad_row_width + j]; } } - } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW("Unsupported Variable Type of Parameter"); } } }; diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index cf0244e8662e827a90d8472a097315680579ff6d..dc2f1763446b2aaf72b20c72e8e37ec920abd120 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -12,12 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "mkldnn.hpp" #include "paddle/fluid/operators/softmax_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" -#include - namespace paddle { namespace operators { @@ -63,9 +62,11 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { softmax_md, 1 /*dim: C*/); // create memory primitives auto softmax_src_memory = - memory({softmax_md, mkldnn_engine}, (void*)input_data); + memory({softmax_md, mkldnn_engine}, + static_cast(const_cast(input_data))); auto softmax_dst_memory = - memory({softmax_md, mkldnn_engine}, (void*)output_data); + memory({softmax_md, mkldnn_engine}, + static_cast(const_cast(output_data))); auto softmax_prim_desc = softmax_forward::primitive_desc(softmax_desc, mkldnn_engine); auto softmax = softmax_forward(softmax_prim_desc, softmax_src_memory, diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index e2c0f915d96b7746191572fa27b725d90cb6e2e5..6bdefc0f23910c90f3878d8f2634ca6e03c6f736 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" + +#include + #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif @@ -20,6 +23,7 @@ limitations under the License. */ #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif + namespace paddle { namespace operators { @@ -60,8 +64,8 @@ class SoftmaxOp : public framework::OperatorWithKernel { auto input_data_type = framework::ToDataType(ctx.Input("X")->type()); if (input_data_type == framework::proto::VarType::FP16) { - PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, - "float16 can only be used when CUDNN is used"); + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "float16 can only be used on GPU place"); } std::string data_format = ctx.Attr("data_format"); @@ -70,6 +74,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { library_); } }; + class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index dbd13fd38a33d4068a5b5d47cd92a81293f6e748..0c1f7cef7ab7b66358d80f6f0670e0d07536128c 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -13,11 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL( - softmax, ops::SoftmaxKernel); +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - softmax_grad, - ops::SoftmaxGradKernel); + softmax, ops::SoftmaxKernel, + ops::SoftmaxKernel); +REGISTER_OP_CUDA_KERNEL(softmax_grad, + ops::SoftmaxGradKernel); diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/split_ids_op.cc index a54f8a2878c8606e6b487552324d1e7dfa94b9b8..a53cbc8ac5199061dafdc7f4cf560b9e4fc577ab 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/split_ids_op.cc @@ -48,11 +48,11 @@ class SplitIdsOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutputs("Out"), "SplitIdsOp must has output Out."); auto ids_var_type = ctx->GetInputsVarType("Ids").front(); - PADDLE_ENFORCE_EQ(ids_var_type, framework::proto::VarType::LOD_TENSOR); - auto ids_dims = ctx->GetInputDim("Ids"); - PADDLE_ENFORCE_EQ(ids_dims.size(), 2); - PADDLE_ENFORCE_EQ(ids_dims[1], 1); + if (ids_var_type == framework::proto::VarType::LOD_TENSOR) { + PADDLE_ENFORCE_EQ(ids_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[1], 1); + } } }; @@ -60,8 +60,9 @@ class SplitIdsOpInferVarType : public framework::VarTypeInference { public: void operator()(const framework::OpDesc &op_desc, framework::BlockDesc *block) const override { + auto *input_var = block->Var(op_desc.Input("Ids")[0]); for (auto &out_var : op_desc.Output("Out")) { - block->Var(out_var)->SetType(framework::proto::VarType::LOD_TENSOR); + block->Var(out_var)->SetType(input_var->GetType()); } } }; @@ -73,4 +74,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(split_ids, ops::SplitIdsOp, ops::SplitIdsOpMaker, ops::SplitIdsOpInferVarType); REGISTER_OP_CPU_KERNEL( - split_ids, ops::SplitIdsOpKernel); + split_ids, ops::SplitIdsOpKernel, + ops::SplitIdsOpKernel); diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/split_ids_op.h index 3e750ed2d171876ce2d3c232f5d34234217b3c3e..d263426e073d95ad6d584c7370baf596587a993d 100644 --- a/paddle/fluid/operators/split_ids_op.h +++ b/paddle/fluid/operators/split_ids_op.h @@ -24,38 +24,65 @@ namespace operators { template class SplitIdsOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext &ctx) const override { auto place = ctx.GetPlace(); if (!platform::is_cpu_place(place)) { PADDLE_THROW("SplitIds do not support GPU kernel"); } - const auto* ids_t = ctx.Input("Ids"); - auto& ids_dims = ids_t->dims(); - auto outs = ctx.MultiOutput("Out"); + const auto *ids_var = ctx.InputVar("Ids"); + if (ids_var->IsType()) { + const auto &ids_dims = ctx.Input("Ids")->dims(); + const T *ids = ctx.Input("Ids")->data(); + auto outs = ctx.MultiOutput("Out"); + const size_t shard_num = outs.size(); - const T* ids = ids_t->data(); + std::vector> out_ids; + out_ids.resize(outs.size()); - const size_t shard_num = outs.size(); - - std::vector> out_ids; - out_ids.resize(outs.size()); + // split id by their shard_num. + for (int i = 0; i < ids_dims[0]; ++i) { + T id = ids[i]; + size_t shard_id = static_cast(id) % shard_num; + out_ids[shard_id].push_back(id); + } - // split id by their shard_num. - for (size_t i = 0; i < ids_dims[0]; ++i) { - T id = ids[i]; - size_t shard_id = static_cast(id) % shard_num; - out_ids[shard_id].push_back(id); - } + // create tensor for each shard and send to parameter server + for (size_t i = 0; i < out_ids.size(); ++i) { + auto *shard_t = outs[i]; + std::vector ids = out_ids[i]; + auto *shard_data = shard_t->mutable_data( + framework::make_ddim({static_cast(ids.size()), 1}), place); + for (size_t i = 0; i < ids.size(); ++i) { + shard_data[i] = ids[i]; + } + } + } else if (ids_var->IsType()) { + const auto *ids_selected_rows = ctx.Input("Ids"); + auto &ids_dims = ids_selected_rows->value().dims(); + PADDLE_ENFORCE_EQ(ids_dims[0], + static_cast(ids_selected_rows->rows().size()), + ""); + const T *ids = ids_selected_rows->value().data(); + const auto &ids_rows = ids_selected_rows->rows(); + auto outs = ctx.MultiOutput("Out"); + const size_t shard_num = outs.size(); + // get rows for outputs + for (auto &id : ids_rows) { + size_t shard_id = static_cast(id) % shard_num; + outs[shard_id]->mutable_rows()->push_back(id); + } - // create tensor for each shard and send to parameter server - for (size_t i = 0; i < out_ids.size(); ++i) { - auto* shard_t = outs[i]; - std::vector ids = out_ids[i]; - auto* shard_data = shard_t->mutable_data( - framework::make_ddim({static_cast(ids.size()), 1}), place); - for (size_t i = 0; i < ids.size(); ++i) { - shard_data[i] = ids[i]; + int64_t row_width = ids_dims[1]; + for (auto &out : outs) { + out->set_height(ids_selected_rows->height()); + framework::DDim ddim = framework::make_ddim( + {static_cast(out->rows().size()), row_width}); + T *output = out->mutable_value()->mutable_data(ddim, place); + for (int64_t i = 0; i < ddim[0]; ++i) { + memcpy(output + i * row_width, ids + out->rows()[i] * row_width, + row_width * sizeof(T)); + } } } } diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index dffac772f11bee2fa3dcdf469a86adc57369b54d..e745509ec8c1f2ec305d7d4aabfdd43d847124b5 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/split_op.h" -#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index ae8562c0c503fec13dff61e04845ba0832848f5f..e2c41f44ab3ea3c42837974dae749278c9356ba5 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index f1c4415f27d54ad09e5cb3659bd16abd82e38215..8c55b4ebbc88f696e99b1194055bed3b0d0b3f0b 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/spp_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 3d2f22632570fe2a28a822370a400390c78b533a..08cb7849d20443862b66ea6096c095b294c7242c 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/pooling.h" diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 22c1db82e9f5aff6aa9a311cd1093b33fa7e6db9..7a10218e1556698f3e0a1828db5de8851dd1c90b 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -37,8 +37,8 @@ inline void StridedMemcpy(const platform::DeviceContext& dev_ctx, const T* src, const framework::DDim& src_stride, const framework::DDim& dst_dim, const framework::DDim& dst_stride, T* dst) { - using namespace detail; - StridedCopyDimVisitor func(dev_ctx, src, src_stride, dst_stride, dst); + paddle::operators::detail::StridedCopyDimVisitor func( + dev_ctx, src, src_stride, dst_stride, dst); boost::apply_visitor(func, dst_dim); } diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index d3d5c8a3429e2070c5472355b4440401eaa699cb..108f26fafe7af76eaa613d77ed77748ee43ea234 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -10,7 +10,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sum_op.h" + +#include +#include #include + #include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/operators/detail/safe_ref.h" @@ -35,7 +39,10 @@ class SumOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputsDim("X"); size_t N = x_dims.size(); - PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); + PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0."); + if (N == 1) { + VLOG(3) << "Warning: sum have only one input, may waste memory"; + } framework::DDim in_dim({0}); for (auto& x_dim : x_dims) { diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index e7e5346cdca5efaf81c2b0fddedde7406e3b874d..49a4afb3a8a19c97e844e66477c6288772ece807 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index bfd26c2f2294f954adc81a1719650c46372098c4..d7f4d383ce0d9e1ff42fc12c96aaf0ceb532e5db 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/assert.h" namespace paddle { @@ -133,71 +134,71 @@ __device__ __forceinline__ void GetTopK(Pair topk[], const T* val, int* col, } template -__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int* beam, int beam_size, const T* src, - bool& firstStep, bool& is_empty, - Pair& max, int dim, + bool* firstStep, bool* is_empty, + Pair* max, int dim, const int tid) { - if (beam > 0) { - int length = beam < beam_size ? beam : beam_size; - if (firstStep) { - firstStep = false; + if (*beam > 0) { + int length = (*beam) < beam_size ? *beam : beam_size; + if (*firstStep) { + *firstStep = false; GetTopK(topk, src, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { - if (k < MaxLength - beam) { - topk[k] = topk[k + beam]; + if (k < MaxLength - (*beam)) { + topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } - if (!is_empty) { - GetTopK(topk + MaxLength - beam, src, tid, dim, max, + if (!(*is_empty)) { + GetTopK(topk + MaxLength - *beam, src, tid, dim, *max, length); } } - max = topk[MaxLength - 1]; - if (max.v == -1) is_empty = true; - beam = 0; + *max = topk[MaxLength - 1]; + if ((*max).v == -1) *is_empty = true; + *beam = 0; } } template -__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int* beam, int beam_size, const T* val, - int* col, bool& firstStep, - bool& is_empty, Pair& max, + int* col, bool* firstStep, + bool* is_empty, Pair* max, int dim, const int tid) { - if (beam > 0) { - int length = beam < beam_size ? beam : beam_size; - if (firstStep) { - firstStep = false; + if (*beam > 0) { + int length = (*beam) < beam_size ? *beam : beam_size; + if (*firstStep) { + *firstStep = false; GetTopK(topk, val, col, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { - if (k < MaxLength - beam) { - topk[k] = topk[k + beam]; + if (k < MaxLength - *beam) { + topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } - if (!is_empty) { - GetTopK(topk + MaxLength - beam, val, col, tid, dim, max, + if (!(*is_empty)) { + GetTopK(topk + MaxLength - *beam, val, col, tid, dim, max, length); } } - max = topk[MaxLength - 1]; - if (max.v == -1) is_empty = true; - beam = 0; + *max = topk[MaxLength - 1]; + if ((*max).v == -1) *is_empty = true; + *beam = 0; } } template __device__ __forceinline__ void BlockReduce(Pair* sh_topk, int* maxid, Pair topk[], T** topVal, - int64_t** topIds, int& beam, int& k, + int64_t** topIds, int* beam, int* k, const int tid, const int warp) { while (true) { __syncthreads(); @@ -225,17 +226,17 @@ __device__ __forceinline__ void BlockReduce(Pair* sh_topk, int* maxid, (*topVal)++; (*topIds)++; } - if (tid == maxid[0]) beam++; - if (--k == 0) break; + if (tid == maxid[0]) (*beam)++; + if (--(*k) == 0) break; __syncthreads(); if (tid == maxid[0]) { - if (beam < MaxLength) { - sh_topk[tid] = topk[beam]; + if (*beam < MaxLength) { + sh_topk[tid] = topk[*beam]; } } if (maxid[0] / 32 == warp) { - if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break; + if (__shfl(*beam, (maxid[0]) % 32, 32) == MaxLength) break; } } } @@ -268,13 +269,13 @@ __global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices, topk[k].set(-INFINITY, -1); } while (k) { - ThreadGetTopK(topk, beam, k, - src + blockIdx.x * lds, firststep, - is_empty, max, dim, tid); + ThreadGetTopK(topk, &beam, k, + src + blockIdx.x * lds, &firststep, + &is_empty, &max, dim, tid); sh_topk[tid] = topk[0]; BlockReduce(sh_topk, maxid, topk, &output, - &indices, beam, k, tid, warp); + &indices, &beam, &k, tid, warp); } } @@ -308,9 +309,9 @@ class TopkOpCUDAKernel : public framework::OpKernel { KeMatrixTopK<<< grid, threads, 0, reinterpret_cast( ctx.device_context()) - .stream()>>>(output_data, output->dims()[1], - indices_data, input_data, - input_width, input_width, int(k)); + .stream()>>>( + output_data, output->dims()[1], indices_data, input_data, input_width, + input_width, static_cast(k)); } }; diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index 42828b7e6564d7da91d608d63fbc0615ef6c4f97..9f8482adedb4c29e32d4109941a2752d942ae49f 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -15,6 +15,8 @@ limitations under the License. */ #pragma once #include #include +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 87b1f530e08df7022d112b26e28511a982052126..4aea9cd65bed615c84c95d891a0a4092678e1444 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/transpose_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 90f16499a6f52514bfed3dbeb4176ccc956b23d7..895d1ce2cca19c0c1e4aa03cc64eb1425e8bab1a 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 87699362b2b5a14750a01345098ec5e6cc9be115..acaefaacdaa593c090d81084fdc1b3665314833f 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -24,7 +24,19 @@ template class CPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* tensor = ctx.Output("Out"); + framework::Tensor* tensor = nullptr; + auto out_var = ctx.OutputVar("Out"); + if (out_var->IsType()) { + tensor = out_var->GetMutable(); + } else if (out_var->IsType()) { + auto shape = ctx.Attr>("shape"); + tensor = out_var->GetMutable()->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); + } else { + PADDLE_THROW( + "uniform_random_op's output only" + "supports SelectedRows and Tensor"); + } T* data = tensor->mutable_data(ctx.GetPlace()); unsigned int seed = static_cast(ctx.Attr("seed")); std::minstd_rand engine; diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 1232cd1eb332441b12e59a34b2c2f75669925fd0..e1c7323a30233f4ec4f60e46aa6088ee6d8601b7 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -43,7 +43,19 @@ template class GPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* tensor = context.Output("Out"); + framework::Tensor* tensor = nullptr; + auto out_var = context.OutputVar("Out"); + if (out_var->IsType()) { + tensor = out_var->GetMutable(); + } else if (out_var->IsType()) { + auto shape = context.Attr>("shape"); + tensor = out_var->GetMutable()->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); + } else { + PADDLE_THROW( + "uniform_random_op's output only" + "supports SelectedRows and Tensor"); + } T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 0ca7ea00fafc5cf7ab240e1e41710d3b791dfbfb..31859fd1d70dc6e6387258cd5f7412e78a302567 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/unpool_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unpool_op.h b/paddle/fluid/operators/unpool_op.h index a4421045756bd39728fc14c06efd11a56c7e55af..96abad3de9b959ee611355c67f1fa9e56c430b1b 100644 --- a/paddle/fluid/operators/unpool_op.h +++ b/paddle/fluid/operators/unpool_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/unpooling.h" diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 3e3e3089315ab9365925c38b9bce5fb0120d37c3..afbfe69973830bde93ec0af8d1c844580a786663 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_padding.h" diff --git a/paddle/fluid/platform/.clang-format b/paddle/fluid/platform/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/platform/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 686c0889140f0050b37192542ca98e2f3e5f23df..917bdc64abf608b8ade70c47f76a8adffb32046a 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -6,8 +6,8 @@ add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch _ add_dependencies(profiler_py_proto profiler_py_proto_init) add_custom_command(TARGET profiler_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/profiler - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/profiler + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/profiler + COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/profiler COMMENT "Copy generated python proto into directory paddle/fluid/proto/profiler." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) @@ -42,12 +42,12 @@ ENDIF() # memcpy depends on device_context, here add deps individually for # avoiding cycle dependencies -cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator - system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) +cc_library(device_context SRCS device_context.cc DEPS malloc + place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) -nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) +nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) diff --git a/paddle/fluid/platform/call_once.h b/paddle/fluid/platform/call_once.h deleted file mode 100644 index fa34972c38d6e7f77a7e178d68592f9886748fa1..0000000000000000000000000000000000000000 --- a/paddle/fluid/platform/call_once.h +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -namespace paddle { -namespace platform { - -/* - The current implementation of std::call_once has a bug described in - https://stackoverflow.com/questions/41717579/stdcall-once-hangs-on-second-call-after-callable-threw-on-first-call. - This is likely caused by a deeper bug of pthread_once, which is discussed in - https://patchwork.ozlabs.org/patch/482350/ - - This wrap is a hack to avoid this bug. -*/ -template -inline void call_once(std::once_flag& flag, Callable&& f, Args&&... args) { - bool good = true; - std::exception ex; - try { - std::call_once(flag, - [&](Args&&... args) { - try { - f(args...); - } catch (const std::exception& e) { - ex = e; - good = false; - } catch (...) { - ex = std::runtime_error("excption caught in call_once"); - good = false; - } - }, - args...); - } catch (std::system_error& x) { - throw std::runtime_error("call once failed"); - } - if (!good) { - throw std::exception(ex); - } -} - -} // namespace platform -} // namespace paddle diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 8db08edba805e41d33ec6a6a4b338cca0d4906ef..4fc9aae8e36e9b43d65fab0b92ec3a2549057128 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -27,6 +27,11 @@ DEFINE_double(fraction_of_cpu_memory_to_use, 1, "Default use 100% of CPU memory for PaddlePaddle," "reserve the rest for page tables, etc"); +DEFINE_double( + fraction_of_cuda_pinned_memory_to_use, 0.5, + "Default use 50% of CPU memory as the pinned_memory for PaddlePaddle," + "reserve the rest for page tables, etc"); + namespace paddle { namespace platform { @@ -62,5 +67,22 @@ size_t CpuMaxChunkSize() { return CpuMaxAllocSize() / 32; } +size_t CUDAPinnedMaxAllocSize() { + // For distributed systems, it requires configuring and limiting + // the fraction of memory to use. + return FLAGS_fraction_of_cuda_pinned_memory_to_use * CpuTotalPhysicalMemory(); +} + +size_t CUDAPinnedMinChunkSize() { + // Allow to allocate the minimum chunk size is 64 KB. + return 1 << 16; +} + +size_t CUDAPinnedMaxChunkSize() { + // Allow to allocate the maximum chunk size is roughly 1/256 of CUDA_PINNED + // memory. + return CUDAPinnedMaxAllocSize() / 256; +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index a930151bd15a33d5b8861c6239e7dd964822f0f6..f06c2b67fe4385f427322e9bb2f3080fdd3acc94 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -22,11 +22,20 @@ namespace platform { //! Get the maximum allocation size for a machine. size_t CpuMaxAllocSize(); +//! Get the maximum allocation size for a machine. +size_t CUDAPinnedMaxAllocSize(); + //! Get the minimum chunk size for buddy allocator. size_t CpuMinChunkSize(); //! Get the maximum chunk size for buddy allocator. size_t CpuMaxChunkSize(); +//! Get the minimum chunk size for buddy allocator. +size_t CUDAPinnedMinChunkSize(); + +//! Get the maximum chunk size for buddy allocator. +size_t CUDAPinnedMaxChunkSize(); + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index 78332f90cd96d80cca0cf865f4815aaf18463253..aac882e846309f23f49f68aba805da0857c7fb2d 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/string/printf.h" #include #include @@ -20,6 +19,7 @@ #include "gflags/gflags.h" #include "glog/logging.h" #include "gtest/gtest.h" +#include "paddle/fluid/string/printf.h" DECLARE_double(fraction_of_cpu_memory_to_use); diff --git a/paddle/fluid/platform/cuda_helper.h b/paddle/fluid/platform/cuda_helper.h index a4ea4f21e3c16c9292cf67863616924e9d9f8aba..8758af0804ae08fec6fa64d7387f197f046ce20e 100644 --- a/paddle/fluid/platform/cuda_helper.h +++ b/paddle/fluid/platform/cuda_helper.h @@ -33,22 +33,26 @@ constexpr int PADDLE_CUDA_NUM_THREADS = 512; USE_CUDA_ATOMIC(Add, float); USE_CUDA_ATOMIC(Add, int); USE_CUDA_ATOMIC(Add, unsigned int); -USE_CUDA_ATOMIC(Add, unsigned long long int); +// CUDA API uses unsigned long long int, we cannot use uint64_t here. +// It because unsigned long long int is not necessarily uint64_t +USE_CUDA_ATOMIC(Add, unsigned long long int); // NOLINT CUDA_ATOMIC_WRAPPER(Add, int64_t) { - static_assert(sizeof(int64_t) == sizeof(long long int), + // Here, we check long long int must be int64_t. + static_assert(sizeof(int64_t) == sizeof(long long int), // NOLINT "long long should be int64"); - return CudaAtomicAdd(reinterpret_cast(address), - static_cast(val)); + return CudaAtomicAdd( + reinterpret_cast(address), // NOLINT + static_cast(val)); // NOLINT } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600 USE_CUDA_ATOMIC(Add, double); #else CUDA_ATOMIC_WRAPPER(Add, double) { - unsigned long long int* address_as_ull = - reinterpret_cast(address); - unsigned long long int old = *address_as_ull, assumed; + unsigned long long int* address_as_ull = // NOLINT + reinterpret_cast(address); // NOLINT + unsigned long long int old = *address_as_ull, assumed; // NOLINT do { assumed = old; @@ -62,53 +66,5 @@ CUDA_ATOMIC_WRAPPER(Add, double) { } #endif -// __shfl_down has been deprecated as of CUDA 9.0. -#if CUDA_VERSION < 9000 -template -__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { - return __shfl_down(val, delta); -} -#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; -#else -#define FULL_WARP_MASK 0xFFFFFFFF -#define CREATE_SHFL_MASK(mask, predicate) \ - mask = __ballot_sync(FULL_WARP_MASK, (predicate)) -#endif - -template -__device__ T reduceSum(T val, int tid, int len) { - // TODO(zcd): The warp size should be taken from the - // parameters of the GPU but not specified as 32 simply. - // To make the reduceSum more efficiently, - // I use Warp-Level Parallelism and assume the Warp size - // is 32 which may be different for different GPU, - // but most card's warp size is 32. - __shared__ T shm[32]; - const int warpSize = 32; - unsigned mask = 0u; - CREATE_SHFL_MASK(mask, tid < len); - - for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += __shfl_down_sync(mask, val, offset); - - if (tid < warpSize) shm[tid] = 0; - - __syncthreads(); - - if (tid % warpSize == 0) { - shm[tid / warpSize] = val; - } - - CREATE_SHFL_MASK(mask, tid < warpSize); - - if (tid < warpSize) { - val = shm[tid]; - for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += __shfl_down_sync(mask, val, offset); - } - - return val; -} - } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cuda_profiler.h b/paddle/fluid/platform/cuda_profiler.h index ebd6aebd7688549c6fb14466cfa461b90a9fdde0..41d7c121469edd24c67b4288793cb95159fd4b62 100644 --- a/paddle/fluid/platform/cuda_profiler.h +++ b/paddle/fluid/platform/cuda_profiler.h @@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include -#include -#include -#include + +#include + +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 7c604e14eb245232ed92f53a00b9bde45c2fbaec..c0d399d078f73743836fc2a0c1d4b1e6b31ecd83 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -257,9 +257,11 @@ class ScopedConvolutionDescriptor { } #endif + cudnnDataType_t compute_type = + (type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; PADDLE_ENFORCE(dynload::cudnnSetConvolutionNdDescriptor( desc_, pads.size(), pads.data(), strides.data(), dilations.data(), - CUDNN_CROSS_CORRELATION, type)); + CUDNN_CROSS_CORRELATION, compute_type)); return desc_; } diff --git a/paddle/fluid/platform/details/device_ptr_cast.h b/paddle/fluid/platform/details/cuda_transform_iterator_cast.h similarity index 50% rename from paddle/fluid/platform/details/device_ptr_cast.h rename to paddle/fluid/platform/details/cuda_transform_iterator_cast.h index 1c502a19c056c7fe434e68d568a0f59bf6315b95..06afc44c257bbeb0729323e1a42e1eead23ff075 100644 --- a/paddle/fluid/platform/details/device_ptr_cast.h +++ b/paddle/fluid/platform/details/cuda_transform_iterator_cast.h @@ -18,16 +18,22 @@ limitations under the License. */ #error device_ptr_cast must be include by .cu file #endif -#include +#include // For std::remove_pointer and std::is_pointer. + +#include "thrust/device_ptr.h" namespace paddle { namespace platform { namespace details { + +// PointerToThrustDevicePtr has two speicalizations, one casts a (CUDA +// device) pointer into thrust::device_ptr, the other keeps rest types +// un-casted. template -struct DevicePtrCast; +struct PointerToThrustDevicePtr; template -struct DevicePtrCast { +struct PointerToThrustDevicePtr { using ELEM = typename std::remove_pointer::type; using RTYPE = thrust::device_ptr; @@ -37,17 +43,26 @@ struct DevicePtrCast { }; template -struct DevicePtrCast { +struct PointerToThrustDevicePtr { using RTYPE = T; inline RTYPE operator()(RTYPE it) const { return it; } }; -// Cast T to thrust::device_ptr if T is a pointer. -// Otherwise, e.g., T is a iterator, return T itself. +// CastToCUDATransformIterator casts a pointer to thrust::device_ptr +// so it could be used as the iterator of thrust::transform. It +// doesn't cast other types. +// +// We need CastToCUDATransformIterator because it is often that we +// want to use device memory pointers as transform iterators, e.g., to +// transform a block of float32 to float16. In this case, we want +// CastToCUDATransformIterator to cast float16/32 pointers to +// thrust::device_ptr, otherwise they cannot work as the iterator +// required by thrust::transform. At the same time, we don't want to +// cast thrust::device_ptr to thrust::device_ptr repeatedly. template -auto DevPtrCast(T t) -> - typename DevicePtrCast::value>::RTYPE { - DevicePtrCast::value> cast; +auto CastToCUDATransformIterator(T t) -> + typename PointerToThrustDevicePtr::value>::RTYPE { + PointerToThrustDevicePtr::value> cast; return cast(t); } diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 59b76a1edb5ec5900520fbccb6a6f8f6e7a70aa4..1f733d71bdfb777d4a2f316a5fefc3c874879862 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -8,10 +8,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/device_context.h" + +#include #include +#include + #include "paddle/fluid/memory/memory.h" + namespace paddle { namespace platform { @@ -53,6 +57,16 @@ DeviceContextPool::DeviceContextPool( PADDLE_THROW( "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " "option"); +#endif + } else if (platform::is_cuda_pinned_place(p)) { +#ifdef PADDLE_WITH_CUDA + device_contexts_.emplace( + p, + PtrType(new CUDAPinnedDeviceContext(boost::get(p)))); +#else + PADDLE_THROW( + "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " + "option"); #endif } } @@ -161,7 +175,7 @@ CUDADeviceContext::~CUDADeviceContext() { Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { - std::lock_guard guard(mutex_); + std::lock_guard guard(mutex_); PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); } @@ -186,6 +200,20 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } cudaStream_t CUDADeviceContext::stream() const { return stream_; } +CUDAPinnedDeviceContext::CUDAPinnedDeviceContext() { + eigen_device_.reset(new Eigen::DefaultDevice()); +} + +CUDAPinnedDeviceContext::CUDAPinnedDeviceContext(CUDAPinnedPlace place) + : place_(place) { + eigen_device_.reset(new Eigen::DefaultDevice()); +} + +Eigen::DefaultDevice* CUDAPinnedDeviceContext::eigen_device() const { + return eigen_device_.get(); +} + +Place CUDAPinnedDeviceContext::GetPlace() const { return place_; } #endif #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 202394c7be7e103a609dd0999fc883c794ef0edd..a9c1984616bc731e0557f2cb89282423aa9c3bac 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -8,11 +8,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include +#include #include +#include #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/dynload/cublas.h" @@ -97,13 +98,20 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Return cuda stream in the device context. */ cudaStream_t stream() const; + template + void RecordEvent(cudaEvent_t ev, Callback callback) { + std::lock_guard guard(mutex_); + callback(); + PADDLE_ENFORCE(cudaEventRecord(ev, stream_)); + } + private: CUDAPlace place_; std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; - mutable std::mutex mutex_; + mutable std::recursive_mutex mutex_; cudaStream_t stream_; cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; @@ -118,6 +126,25 @@ struct DefaultDeviceContextType { using TYPE = CUDADeviceContext; }; +// Currently, CUDAPinnedDeviceContext is only used to data copying. +class CUDAPinnedDeviceContext : public DeviceContext { + public: + CUDAPinnedDeviceContext(); + explicit CUDAPinnedDeviceContext(CUDAPinnedPlace place); + + Place GetPlace() const override; + + Eigen::DefaultDevice* eigen_device() const; + + private: + CUDAPinnedPlace place_; + std::unique_ptr eigen_device_; +}; + +template <> +struct DefaultDeviceContextType { + using TYPE = CUDAPinnedDeviceContext; +}; #endif #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index 9d8d07362ce3a0d0c2a009c9844db0a3bdaf01cb..fa806aba6d8747beebc3eed2c661b326dd62fd76 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -11,11 +11,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - -#include "gtest/gtest.h" #include "paddle/fluid/platform/device_context.h" +#include + #include "glog/logging.h" +#include "gtest/gtest.h" TEST(Device, Init) { using paddle::platform::DeviceContext; diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index 3b4437f576e1c2e931a86ec6d5e823ec1f344c52..c9e10631680a6ea3876f555a3a6e6c12f79b39d5 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -11,15 +11,19 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/device_tracer.h" -#include + +#include #include #include -#include +#include // NOLINT #include -#include +#include +#include // NOLINT +#include + #include "glog/logging.h" +#include "google/protobuf/text_format.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/string/printf.h" @@ -123,7 +127,7 @@ void DisableActivity() { void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { - uint8_t *buf = (uint8_t *)malloc(kBufSize + kAlignSize); + uint8_t *buf = reinterpret_cast(malloc(kBufSize + kAlignSize)); *size = kBufSize; *buffer = ALIGN_BUFFER(buf, kAlignSize); *maxNumRecords = 0; diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index deb3d23f786353b8e7a2f28d094e364158885a34..0375c7439c29d4122e8ff6b58734dad4f504b7a2 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -11,8 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + +#include + #include "paddle/fluid/platform/dynload/cupti.h" #include "paddle/fluid/platform/profiler.pb.h" diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index 84dac2937de02b3374156ebc83e19dac9f9a3e7a..b93b925a72a55442c105e4280a3580f4ea5b93a1 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,6 +1,11 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc) +if (WITH_TENSORRT) + list(APPEND CUDA_SRCS tensorrt.cc) +endif() + + configure_file(cupti_lib_path.h.in ${CMAKE_CURRENT_BINARY_DIR}/cupti_lib_path.h) if (CUPTI_FOUND) list(APPEND CUDA_SRCS cupti.cc) diff --git a/paddle/fluid/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc index e90e3105f0809b3c7507a86fa5a3d61864290fcb..361d3439b844e9f68d3fba0a0e41ec457118a4a9 100644 --- a/paddle/fluid/platform/dynload/cublas.cc +++ b/paddle/fluid/platform/dynload/cublas.cc @@ -24,6 +24,14 @@ void *cublas_dso_handle = nullptr; CUBLAS_BLAS_ROUTINE_EACH(DEFINE_WRAP); +#ifdef CUBLAS_BLAS_ROUTINE_EACH_R2 +CUBLAS_BLAS_ROUTINE_EACH_R2(DEFINE_WRAP); +#endif + +#ifdef CUBLAS_BLAS_ROUTINE_EACH_R3 +CUBLAS_BLAS_ROUTINE_EACH_R3(DEFINE_WRAP); +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index fa9041134d863ebfd8d1e00379da3b92323ae6e3..1ab55d6b9bf8fdbd14c9c2bd978e3e99dba3e73e 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -1,22 +1,23 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #pragma once #include +#include #include -#include +#include // NOLINT #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -34,18 +35,18 @@ extern void *cublas_dso_handle; * note: default dynamic linked libs */ #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - inline cublasStatus_t operator()(Args... args) { \ - typedef cublasStatus_t (*cublasFunc)(Args...); \ - std::call_once(cublas_dso_flag, \ - paddle::platform::dynload::GetCublasDsoHandle, \ - &cublas_dso_handle); \ - void *p_##__name = dlsym(cublas_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline cublasStatus_t operator()(Args... args) { \ + typedef cublasStatus_t (*cublasFunc)(Args...); \ + std::call_once(cublas_dso_flag, []() { \ + cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ + }); \ + void *p_##__name = dlsym(cublas_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ @@ -81,17 +82,32 @@ extern void *cublas_dso_handle; __macro(cublasDgemmBatched); \ __macro(cublasCgemmBatched); \ __macro(cublasZgemmBatched); \ - __macro(cublasSgemmStridedBatched); \ - __macro(cublasDgemmStridedBatched); \ - __macro(cublasCgemmStridedBatched); \ - __macro(cublasZgemmStridedBatched); \ - __macro(cublasHgemmStridedBatched); \ __macro(cublasSgetrfBatched); \ __macro(cublasSgetriBatched); \ __macro(cublasDgetrfBatched); \ - __macro(cublasDgetriBatched) + __macro(cublasDgetriBatched); -CUBLAS_BLAS_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP); +CUBLAS_BLAS_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) + +// APIs available after CUDA 8.0 +#if CUDA_VERSION >= 8000 +#define CUBLAS_BLAS_ROUTINE_EACH_R2(__macro) \ + __macro(cublasGemmEx); \ + __macro(cublasSgemmStridedBatched); \ + __macro(cublasDgemmStridedBatched); \ + __macro(cublasCgemmStridedBatched); \ + __macro(cublasZgemmStridedBatched); \ + __macro(cublasHgemmStridedBatched); + +CUBLAS_BLAS_ROUTINE_EACH_R2(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +#endif + +// APIs available after CUDA 9.0 +#if CUDA_VERSION >= 9000 +#define CUBLAS_BLAS_ROUTINE_EACH_R3(__macro) __macro(cublasSetMathMode); + +CUBLAS_BLAS_ROUTINE_EACH_R3(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +#endif #undef DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP } // namespace dynload diff --git a/paddle/fluid/platform/dynload/cudnn.cc b/paddle/fluid/platform/dynload/cudnn.cc index c65b060ab46cfcd38292be66dd5f2123f88bae63..f3cd3b2bbedef7c9140c2acddea0732972ff7fa0 100644 --- a/paddle/fluid/platform/dynload/cudnn.cc +++ b/paddle/fluid/platform/dynload/cudnn.cc @@ -44,7 +44,8 @@ CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP); #ifdef PADDLE_USE_DSO bool HasCUDNN() { - std::call_once(cudnn_dso_flag, GetCUDNNDsoHandle, &cudnn_dso_handle); + std::call_once(cudnn_dso_flag, + []() { cudnn_dso_handle = GetCUDNNDsoHandle(); }); return cudnn_dso_handle != nullptr; } diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 81acc445bd3803dede158ff09507a72fb6e293ac..24475b62ca2825c45ff7edb39328dece3b822b25 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -30,19 +30,19 @@ extern bool HasCUDNN(); #ifdef PADDLE_USE_DSO extern void EnforceCUDNNLoaded(const char* fn_name); -#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using cudnn_func = decltype(__name(args...)) (*)(Args...); \ - std::call_once(cudnn_dso_flag, \ - paddle::platform::dynload::GetCUDNNDsoHandle, \ - &cudnn_dso_handle); \ - EnforceCUDNNLoaded(#__name); \ - void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using cudnn_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(cudnn_dso_flag, []() { \ + cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ + }); \ + EnforceCUDNNLoaded(#__name); \ + void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern struct DynLoad__##__name __name #else @@ -140,7 +140,8 @@ CUDNN_DNN_ROUTINE_EACH_R5(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #if CUDNN_VERSION >= 7001 #define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ - __macro(cudnnSetConvolutionGroupCount); + __macro(cudnnSetConvolutionGroupCount); \ + __macro(cudnnSetConvolutionMathType); CUDNN_DNN_ROUTINE_EACH_R7(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index c1bf88f8cb690861b97686d99d36410143445243..d0d676b9d8ac462900b48246bec43166d04ef97b 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -11,14 +11,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #ifdef PADDLE_WITH_CUPTI + #include #include #include -#include +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -36,18 +37,18 @@ extern void *cupti_dso_handle; * note: default dynamic linked libs */ #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - inline CUptiResult CUPTIAPI operator()(Args... args) { \ - typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ - std::call_once(cupti_dso_flag, \ - paddle::platform::dynload::GetCUPTIDsoHandle, \ - &cupti_dso_handle); \ - void *p_##__name = dlsym(cupti_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline CUptiResult CUPTIAPI operator()(Args... args) { \ + typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + std::call_once(cupti_dso_flag, []() { \ + cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ + }); \ + void *p_##__name = dlsym(cupti_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 1b3ff962d6edceb37deb94cc7daead7346d25352..4697fb6cd96770127206bdabeea77e43eb09d1f5 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include #include -#include + +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -25,18 +26,18 @@ namespace dynload { extern std::once_flag curand_dso_flag; extern void *curand_dso_handle; #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - curandStatus_t operator()(Args... args) { \ - typedef curandStatus_t (*curandFunc)(Args...); \ - std::call_once(curand_dso_flag, \ - paddle::platform::dynload::GetCurandDsoHandle, \ - &curand_dso_handle); \ - void *p_##__name = dlsym(curand_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + curandStatus_t operator()(Args... args) { \ + typedef curandStatus_t (*curandFunc)(Args...); \ + std::call_once(curand_dso_flag, []() { \ + curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ + }); \ + void *p_##__name = dlsym(curand_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index e590e81bab51fd9fe12309335522614263d8e21d..19c01dc5a968c7e1d2b0f15cf9a0e8427004e58b 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -11,12 +11,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/dynload/dynamic_loader.h" + #include + #include -#include +#include // NOLINT #include + #include "gflags/gflags.h" #include "glog/logging.h" #include "paddle/fluid/platform/dynload/cupti_lib_path.h" @@ -43,6 +45,10 @@ DEFINE_string(nccl_dir, "", DEFINE_string(cupti_dir, "", "Specify path for loading cupti.so."); +DEFINE_string( + tensorrt_dir, "", + "Specify path for loading tensorrt library, such as libnvinfer.so."); + namespace paddle { namespace platform { namespace dynload { @@ -65,22 +71,21 @@ static inline std::string join(const std::string& part1, return ret; } -static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, - void** dso_handle, - int dynload_flags) { +static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path, + int dynload_flags) { VLOG(3) << "Try to find library: " << dso_path << " from default system path."; // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH - *dso_handle = dlopen(dso_path.c_str(), dynload_flags); + void* dso_handle = dlopen(dso_path.c_str(), dynload_flags); // DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to // bring System Integrity Projection (SIP), if dso_handle // is null, search from default package path in Mac OS. #if defined(__APPLE__) || defined(__OSX__) - if (nullptr == *dso_handle) { - dso_path = join("/usr/local/cuda/lib/", dso_path); - *dso_handle = dlopen(dso_path.c_str(), dynload_flags); - if (nullptr == *dso_handle) { + if (nullptr == dso_handle) { + dso_handle = + dlopen(join("/usr/local/cuda/lib/", dso_path).c_str(), dynload_flags); + if (nullptr == dso_handle) { if (dso_path == "libcudnn.dylib") { LOG(WARNING) << "Note: [Recommend] copy cudnn into /usr/local/cuda/ \n " "For instance, sudo tar -xzf " @@ -91,28 +96,29 @@ static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, } } #endif + + return dso_handle; } -static inline void GetDsoHandleFromSearchPath(const std::string& search_root, - const std::string& dso_name, - void** dso_handle, - bool throw_on_error = true) { +static inline void* GetDsoHandleFromSearchPath(const std::string& search_root, + const std::string& dso_name, + bool throw_on_error = true) { int dynload_flags = RTLD_LAZY | RTLD_LOCAL; - *dso_handle = nullptr; + void* dso_handle = nullptr; std::string dlPath = dso_name; if (search_root.empty()) { - GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); + dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags); } else { // search xxx.so from custom path dlPath = join(search_root, dso_name); - *dso_handle = dlopen(dlPath.c_str(), dynload_flags); + dso_handle = dlopen(dlPath.c_str(), dynload_flags); // if not found, search from default path - if (nullptr == *dso_handle) { + if (nullptr == dso_handle) { LOG(WARNING) << "Failed to find dynamic library: " << dlPath << " (" << dlerror() << ")"; dlPath = dso_name; - GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); + dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags); } } auto error_msg = @@ -124,70 +130,79 @@ static inline void GetDsoHandleFromSearchPath(const std::string& search_root, "using the DYLD_LIBRARY_PATH is impossible unless System " "Integrity Protection (SIP) is disabled."; if (throw_on_error) { - PADDLE_ENFORCE(nullptr != *dso_handle, error_msg, dlPath, dlerror()); - } else if (nullptr == *dso_handle) { + PADDLE_ENFORCE(nullptr != dso_handle, error_msg, dlPath, dlerror()); + } else if (nullptr == dso_handle) { LOG(WARNING) << string::Sprintf(error_msg, dlPath, dlerror()); } + + return dso_handle; } -void GetCublasDsoHandle(void** dso_handle) { +void* GetCublasDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.so"); #endif } -void GetCUDNNDsoHandle(void** dso_handle) { +void* GetCUDNNDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle, - false); + return GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", false); #else - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle, false); + return GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", false); #endif } -void GetCUPTIDsoHandle(void** dso_handle) { +void* GetCUPTIDsoHandle() { std::string cupti_path = cupti_lib_path; if (!FLAGS_cupti_dir.empty()) { cupti_path = FLAGS_cupti_dir; } #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(cupti_path, "libcupti.dylib", dso_handle, false); + return GetDsoHandleFromSearchPath(cupti_path, "libcupti.dylib", false); +#else + return GetDsoHandleFromSearchPath(cupti_path, "libcupti.so", false); +#endif +} + +void* GetCurandDsoHandle() { +#if defined(__APPLE__) || defined(__OSX__) + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib"); #else - GetDsoHandleFromSearchPath(cupti_path, "libcupti.so", dso_handle, false); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.so"); #endif } -void GetCurandDsoHandle(void** dso_handle) { +void* GetWarpCTCDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.so"); #endif } -void GetWarpCTCDsoHandle(void** dso_handle) { +void* GetLapackDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so"); #endif } -void GetLapackDsoHandle(void** dso_handle) { +void* GetNCCLDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.so"); #endif } -void GetNCCLDsoHandle(void** dso_handle) { +void* GetTensorRtDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_tensorrt_dir, "libnvinfer.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_tensorrt_dir, "libnvinfer.so"); #endif } diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index b5b9c4af916241c1c7361b506f74563ebcf69b9a..0de3559b6088086cb52c254535b6ec42da7dd724 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -18,55 +18,14 @@ namespace paddle { namespace platform { namespace dynload { -/** - * @brief load the DSO of CUBLAS - * - * @param **dso_handle dso handler - * - */ -void GetCublasDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of CUDNN - * - * @param **dso_handle dso handler - * - */ -void GetCUDNNDsoHandle(void** dso_handle); - -void GetCUPTIDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of CURAND - * - * @param **dso_handle dso handler - * - */ -void GetCurandDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of warp-ctc - * - * @param **dso_handle dso handler - * - */ -void GetWarpCTCDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of lapack - * - * @param **dso_handle dso handler - * - */ -void GetLapackDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of NVIDIA nccl - * - * @param **dso_handle dso handler - * - */ -void GetNCCLDsoHandle(void** dso_handle); +void* GetCublasDsoHandle(); +void* GetCUDNNDsoHandle(); +void* GetCUPTIDsoHandle(); +void* GetCurandDsoHandle(); +void* GetWarpCTCDsoHandle(); +void* GetLapackDsoHandle(); +void* GetNCCLDsoHandle(); +void* GetTensorRtDsoHandle(); } // namespace dynload } // namespace platform diff --git a/paddle/fluid/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc index 3edc70c46d03ddcc751e865676928c47fcb48e69..2c40c48ee08497f9a2a414687b9c51d87ba574aa 100644 --- a/paddle/fluid/platform/dynload/nccl.cc +++ b/paddle/fluid/platform/dynload/nccl.cc @@ -25,11 +25,6 @@ void *nccl_dso_handle; NCCL_RAND_ROUTINE_EACH(DEFINE_WRAP); -void LoadNCCLDSO() { - platform::call_once(nccl_dso_flag, - [] { GetNCCLDsoHandle(&nccl_dso_handle); }); -} - } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index dc78bcb44d3316a1ecee0c8d70dcb4777a9e2de4..c5a10a78a4f432b431680c089f255fea777277cb 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -11,13 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include #include -#include -#include "paddle/fluid/platform/call_once.h" + +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -28,18 +28,19 @@ extern std::once_flag nccl_dso_flag; extern void* nccl_dso_handle; #ifdef PADDLE_USE_DSO -extern void LoadNCCLDSO(); -#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ - paddle::platform::dynload::LoadNCCLDSO(); \ - void* p_##__name = dlsym(nccl_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using nccl_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(nccl_dso_flag, []() { \ + nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ + }); \ + void* p_##__name = dlsym(nccl_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/tensorrt.cc b/paddle/fluid/platform/dynload/tensorrt.cc new file mode 100644 index 0000000000000000000000000000000000000000..f3c8e27944ca9b6419de87d752df3a83751039b1 --- /dev/null +++ b/paddle/fluid/platform/dynload/tensorrt.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/platform/dynload/tensorrt.h" + +namespace paddle { +namespace platform { +namespace dynload { + +std::once_flag tensorrt_dso_flag; +void *tensorrt_dso_handle; + +#define DEFINE_WRAP(__name) DynLoad__##__name __name + +TENSORRT_RAND_ROUTINE_EACH(DEFINE_WRAP); + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/tensorrt.h b/paddle/fluid/platform/dynload/tensorrt.h new file mode 100644 index 0000000000000000000000000000000000000000..f584a49da0fefe0b064b5fb55b01ec132225ce5e --- /dev/null +++ b/paddle/fluid/platform/dynload/tensorrt.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include +#include + +#include // NOLINT + +#include "paddle/fluid/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace platform { +namespace dynload { + +extern std::once_flag tensorrt_dso_flag; +extern void* tensorrt_dso_handle; + +#ifdef PADDLE_USE_DSO + +#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using tensorrt_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(tensorrt_dso_flag, []() { \ + tensorrt_dso_handle = \ + paddle::platform::dynload::GetTensorRtDsoHandle(); \ + PADDLE_ENFORCE(tensorrt_dso_handle, "load tensorrt so failed"); \ + }); \ + void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ + PADDLE_ENFORCE(p_##__name, "load %s failed", #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ + extern DynLoad__##__name __name + +#else +#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + tensorrtResult_t operator()(Args... args) { \ + return __name(args...); \ + } \ + }; \ + extern DynLoad__##__name __name +#endif + +#define TENSORRT_RAND_ROUTINE_EACH(__macro) \ + __macro(createInferBuilder_INTERNAL); \ + __macro(createInferRuntime_INTERNAL); + +TENSORRT_RAND_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP) + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index f5ded0eb6b1107c886641e848f5040a7a2d806a5..7fa468370463a51c486b80317f401612930bc72e 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -15,9 +15,10 @@ limitations under the License. */ #pragma once #include -#include -#include "ctc.h" +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" +#include "warpctc/include/ctc.h" namespace paddle { namespace platform { @@ -31,18 +32,18 @@ extern void* warpctc_dso_handle; * (for each function) to dynamic load warpctc routine * via operator overloading. */ -#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ - std::call_once(warpctc_dso_flag, \ - paddle::platform::dynload::GetWarpCTCDsoHandle, \ - &warpctc_dso_handle); \ - void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ - return reinterpret_cast(p_##_name)(args...); \ - } \ - }; \ +#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ + std::call_once(warpctc_dso_flag, []() { \ + warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ + }); \ + void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ + return reinterpret_cast(p_##_name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #define DECLARE_DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index d303fd6d63f8424c1c88a31eb3fa6f2136e0e430..7b8c29e1e642ec6bb4023afd8c083311b8b31812 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -16,35 +16,35 @@ limitations under the License. */ #include // for dladdr #include // for backtrace + +#ifdef __GNUC__ +#include // for __cxa_demangle +#endif // __GNUC__ + +#ifdef PADDLE_WITH_CUDA +#include +#include +#include +#include +#include +#endif // PADDLE_WITH_CUDA + #include #include #include #include #include +#include "glog/logging.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/string/printf.h" #include "paddle/fluid/string/to_string.h" -#ifdef __GNUC__ -#include // for __cxa_demangle -#endif - -#include - #ifdef PADDLE_WITH_CUDA - #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/dynload/curand.h" #include "paddle/fluid/platform/dynload/nccl.h" - -#include -#include -#include -#include -#include - #endif namespace paddle { @@ -185,7 +185,7 @@ inline typename std::enable_if::type throw_on_error( } } -#endif // PADDLE_ONLY_CPU +#endif // PADDLE_WITH_CUDA template inline void throw_on_error(T e) { diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index bb9a3543ff267dadf3dfee260a320d292a1ba3cb..57d751cc00b5f11f1ba1a3b0c9a6b7ce9e79f586 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -96,7 +96,6 @@ TEST(ENFORCE_GT, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GT(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -115,7 +114,6 @@ TEST(ENFORCE_GE, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GE(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -135,7 +133,6 @@ TEST(ENFORCE_LE, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GT(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -171,7 +168,6 @@ TEST(ENFORCE_NOT_NULL, FAIL) { try { int* a = nullptr; PADDLE_ENFORCE_NOT_NULL(a); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE(HasPrefix(StringPiece(error.what()), "a should not be null")); diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 2cf311c7e56a9bbb0bdb0078d5cfefb4bb50018b..ffd183af68514dbb1a8b3de39000c9ca3f56ddc3 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #ifdef PADDLE_WITH_CUDA #include @@ -293,39 +294,39 @@ struct PADDLE_ALIGN(2) float16 { HOSTDEVICE inline explicit operator bool() const { return (x & 0x7fff) != 0; } HOSTDEVICE inline explicit operator int8_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint8_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int16_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint16_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int32_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint32_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int64_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint64_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator double() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } private: @@ -370,7 +371,7 @@ DEVICE inline half operator+(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hadd(a, b); #else - float res = float(float16(a)) + float(float16(b)); + float res = static_cast(float16(a)) + static_cast(float16(b)); return half(float16(res)); #endif } @@ -379,7 +380,7 @@ DEVICE inline half operator-(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hsub(a, b); #else - float res = float(float16(a)) - float(float16(b)); + float res = static_cast(float16(a)) - static_cast(float16(b)); return half(float16(res)); #endif } @@ -388,7 +389,7 @@ DEVICE inline half operator*(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hmul(a, b); #else - float res = float(float16(a)) * float(float16(b)); + float res = static_cast(float16(a)) * static_cast(float16(b)); return half(float16(res)); #endif } @@ -399,7 +400,7 @@ DEVICE inline half operator/(const half& a, const half& b) { float denom = __half2float(b); return __float2half(num / denom); #else - float res = float(float16(a)) / float(float16(b)); + float res = static_cast(float16(a)) / static_cast(float16(b)); return half(float16(res)); #endif } @@ -408,27 +409,27 @@ DEVICE inline half operator-(const half& a) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hneg(a); #else - float res = -float(float16(a)); + float res = -static_cast(float16(a)); return half(float16(res)); #endif } -DEVICE inline half& operator+=(half& a, const half& b) { +DEVICE inline half& operator+=(half& a, const half& b) { // NOLINT a = a + b; return a; } -DEVICE inline half& operator-=(half& a, const half& b) { +DEVICE inline half& operator-=(half& a, const half& b) { // NOLINT a = a - b; return a; } -DEVICE inline half& operator*=(half& a, const half& b) { +DEVICE inline half& operator*=(half& a, const half& b) { // NOLINT a = a * b; return a; } -DEVICE inline half& operator/=(half& a, const half& b) { +DEVICE inline half& operator/=(half& a, const half& b) { // NOLINT a = a / b; return a; } @@ -437,7 +438,7 @@ DEVICE inline bool operator==(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __heq(a, b); #else - return float(float16(a)) == float(float16(b)); + return static_cast(float16(a)) == static_cast(float16(b)); #endif } @@ -445,7 +446,7 @@ DEVICE inline bool operator!=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hne(a, b); #else - return float(float16(a)) != float(float16(b)); + return static_cast(float16(a)) != static_cast(float16(b)); #endif } @@ -453,7 +454,7 @@ DEVICE inline bool operator<(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hlt(a, b); #else - return float(float16(a)) < float(float16(b)); + return static_cast(float16(a)) < static_cast(float16(b)); #endif } @@ -461,7 +462,7 @@ DEVICE inline bool operator<=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hle(a, b); #else - return float(float16(a)) <= float(float16(b)); + return static_cast(float16(a)) <= static_cast(float16(b)); #endif } @@ -469,7 +470,7 @@ DEVICE inline bool operator>(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hgt(a, b); #else - return float(float16(a)) > float(float16(b)); + return static_cast(float16(a)) > static_cast(float16(b)); #endif } @@ -477,7 +478,7 @@ DEVICE inline bool operator>=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hge(a, b); #else - return float(float16(a)) >= float(float16(b)); + return static_cast(float16(a)) >= static_cast(float16(b)); #endif } @@ -489,7 +490,7 @@ HOSTDEVICE inline float16 operator+(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hadd(half(a), half(b))); #else - return float16(float(a) + float(b)); + return float16(static_cast(a) + static_cast(b)); #endif } @@ -497,7 +498,7 @@ HOSTDEVICE inline float16 operator-(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hsub(half(a), half(b))); #else - return float16(float(a) - float(b)); + return float16(static_cast(a) - static_cast(b)); #endif } @@ -505,7 +506,7 @@ HOSTDEVICE inline float16 operator*(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hmul(half(a), half(b))); #else - return float16(float(a) * float(b)); + return float16(static_cast(a) * static_cast(b)); #endif } @@ -516,7 +517,7 @@ HOSTDEVICE inline float16 operator/(const float16& a, const float16& b) { float denom = __half2float(half(b)); return float16(num / denom); #else - return float16(float(a) / float(b)); + return float16(static_cast(a) / static_cast(b)); #endif } @@ -530,22 +531,22 @@ HOSTDEVICE inline float16 operator-(const float16& a) { #endif } -HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { // NOLINT a = a + b; return a; } -HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { // NOLINT a = a - b; return a; } -HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { // NOLINT a = a * b; return a; } -HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { // NOLINT a = a / b; return a; } @@ -554,7 +555,7 @@ HOSTDEVICE inline bool operator==(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __heq(half(a), half(b)); #else - return float(a) == float(b); + return static_cast(a) == static_cast(b); #endif } @@ -562,7 +563,7 @@ HOSTDEVICE inline bool operator!=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hne(half(a), half(b)); #else - return float(a) != float(b); + return static_cast(a) != static_cast(b); #endif } @@ -570,7 +571,7 @@ HOSTDEVICE inline bool operator<(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hlt(half(a), half(b)); #else - return float(a) < float(b); + return static_cast(a) < static_cast(b); #endif } @@ -578,7 +579,7 @@ HOSTDEVICE inline bool operator<=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hle(half(a), half(b)); #else - return float(a) <= float(b); + return static_cast(a) <= static_cast(b); #endif } @@ -586,7 +587,7 @@ HOSTDEVICE inline bool operator>(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hgt(half(a), half(b)); #else - return float(a) > float(b); + return static_cast(a) > static_cast(b); #endif } @@ -594,7 +595,7 @@ HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hge(half(a), half(b)); #else - return float(a) >= float(b); + return static_cast(a) >= static_cast(b); #endif } @@ -679,22 +680,22 @@ inline float16 operator-(const float16& a) { return res; } -inline float16& operator+=(float16& a, const float16& b) { +inline float16& operator+=(float16& a, const float16& b) { // NOLINT a = a + b; return a; } -inline float16& operator-=(float16& a, const float16& b) { +inline float16& operator-=(float16& a, const float16& b) { // NOLINT a = a - b; return a; } -inline float16& operator*=(float16& a, const float16& b) { +inline float16& operator*=(float16& a, const float16& b) { // NOLINT a = a * b; return a; } -inline float16& operator/=(float16& a, const float16& b) { +inline float16& operator/=(float16& a, const float16& b) { // NOLINT a = a / b; return a; } @@ -784,19 +785,19 @@ inline bool operator>=(const float16& a, const float16& b) { // Arithmetic operators for float16, software emulated on other CPU #else inline float16 operator+(const float16& a, const float16& b) { - return float16(float(a) + float(b)); + return float16(static_cast(a) + static_cast(b)); } inline float16 operator-(const float16& a, const float16& b) { - return float16(float(a) - float(b)); + return float16(static_cast(a) - static_cast(b)); } inline float16 operator*(const float16& a, const float16& b) { - return float16(float(a) * float(b)); + return float16(static_cast(a) * static_cast(b)); } inline float16 operator/(const float16& a, const float16& b) { - return float16(float(a) / float(b)); + return float16(static_cast(a) / static_cast(b)); } inline float16 operator-(const float16& a) { @@ -805,51 +806,57 @@ inline float16 operator-(const float16& a) { return res; } -inline float16& operator+=(float16& a, const float16& b) { - a = float16(float(a) + float(b)); +inline float16& operator+=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) + static_cast(b)); return a; } -inline float16& operator-=(float16& a, const float16& b) { - a = float16(float(a) - float(b)); +inline float16& operator-=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) - static_cast(b)); return a; } -inline float16& operator*=(float16& a, const float16& b) { - a = float16(float(a) * float(b)); +inline float16& operator*=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) * static_cast(b)); return a; } -inline float16& operator/=(float16& a, const float16& b) { - a = float16(float(a) / float(b)); +inline float16& operator/=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) / static_cast(b)); return a; } inline bool operator==(const float16& a, const float16& b) { - return float(a) == float(b); + return static_cast(a) == static_cast(b); } inline bool operator!=(const float16& a, const float16& b) { - return float(a) != float(b); + return static_cast(a) != static_cast(b); } inline bool operator<(const float16& a, const float16& b) { - return float(a) < float(b); + return static_cast(a) < static_cast(b); } inline bool operator<=(const float16& a, const float16& b) { - return float(a) <= float(b); + return static_cast(a) <= static_cast(b); } inline bool operator>(const float16& a, const float16& b) { - return float(a) > float(b); + return static_cast(a) > static_cast(b); } inline bool operator>=(const float16& a, const float16& b) { - return float(a) >= float(b); + return static_cast(a) >= static_cast(b); } #endif +HOSTDEVICE inline float16 raw_uint16_to_float16(uint16_t a) { + float16 res; + res.x = a; + return res; +} + HOSTDEVICE inline bool(isnan)(const float16& a) { #if defined(PADDLE_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hisnan(half(a)); @@ -866,6 +873,11 @@ HOSTDEVICE inline bool(isfinite)(const float16& a) { return !((isnan)(a)) && !((isinf)(a)); } +inline std::ostream& operator<<(std::ostream& os, const float16& a) { + os << static_cast(a); + return os; +} + } // namespace platform } // namespace paddle @@ -886,28 +898,156 @@ struct is_pod { is_standard_layout::value; }; +template <> +struct numeric_limits { + static const bool is_specialized = true; + static const bool is_signed = true; + static const bool is_integer = false; + static const bool is_exact = false; + static const bool has_infinity = true; + static const bool has_quiet_NaN = true; + static const bool has_signaling_NaN = true; + static const float_denorm_style has_denorm = denorm_present; + static const bool has_denorm_loss = false; + static const std::float_round_style round_style = std::round_to_nearest; + static const bool is_iec559 = false; + static const bool is_bounded = false; + static const bool is_modulo = false; + static const int digits = 11; + static const int digits10 = 3; + static const int max_digits10 = 5; + static const int radix = 2; + static const int min_exponent = -13; + static const int min_exponent10 = -4; + static const int max_exponent = 16; + static const int max_exponent10 = 4; + static const bool traps = true; + static const bool tinyness_before = false; + + static paddle::platform::float16(min)() { + return paddle::platform::raw_uint16_to_float16(0x400); + } + static paddle::platform::float16 lowest() { + return paddle::platform::raw_uint16_to_float16(0xfbff); + } + static paddle::platform::float16(max)() { + return paddle::platform::raw_uint16_to_float16(0x7bff); + } + static paddle::platform::float16 epsilon() { + return paddle::platform::raw_uint16_to_float16(0x0800); + } + static paddle::platform::float16 round_error() { + return paddle::platform::float16(0.5); + } + static paddle::platform::float16 infinity() { + return paddle::platform::raw_uint16_to_float16(0x7c00); + } + static paddle::platform::float16 quiet_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7e00); + } + static paddle::platform::float16 signaling_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7e00); + } + static paddle::platform::float16 denorm_min() { + return paddle::platform::raw_uint16_to_float16(0x1); + } +}; + } // namespace std namespace Eigen { + +using float16 = paddle::platform::float16; + +template <> +struct NumTraits : GenericNumTraits { + enum { + IsSigned = true, + IsInteger = false, + IsComplex = false, + RequireInitialization = false + }; + + HOSTDEVICE static inline float16 epsilon() { + return paddle::platform::raw_uint16_to_float16(0x0800); + } + HOSTDEVICE static inline float16 dummy_precision() { return float16(1e-2f); } + HOSTDEVICE static inline float16 highest() { + return paddle::platform::raw_uint16_to_float16(0x7bff); + } + HOSTDEVICE static inline float16 lowest() { + return paddle::platform::raw_uint16_to_float16(0xfbff); + } + HOSTDEVICE static inline float16 infinity() { + return paddle::platform::raw_uint16_to_float16(0x7c00); + } + HOSTDEVICE static inline float16 quiet_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7c01); + } +}; + namespace numext { template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isnan)(const float16& a) { return (paddle::platform::isnan)(a); } template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isinf)(const float16& a) { return (paddle::platform::isinf)(a); } template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isfinite)(const float16& a) { return (paddle::platform::isfinite)(a); } +template <> +HOSTDEVICE inline float16 exp(const float16& a) { + return float16(::expf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 log(const float16& a) { + return float16(::logf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 tanh(const float16& a) { + return float16(::tanhf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 sqrt(const float16& a) { + return float16(::sqrtf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 ceil(const float16& a) { + return float16(::ceilf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 floor(const float16& a) { + return float16(::floorf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 round(const float16& a) { + return float16(::roundf(static_cast(a))); +} + +template <> +HOSTDEVICE inline float16 pow(const float16& a, const float16& b) { + return float16(::powf(static_cast(a), static_cast(b))); +} + +template <> +HOSTDEVICE inline float16 abs(const float16& a) { + return float16(::fabs(static_cast(a))); +} + } // namespace numext + } // namespace Eigen diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index b716ad9df41330bd6e22937381d24e33fa3a7914..a589e32b61a9b6a44bdc4529eee715d987d6922c 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -8,13 +8,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/float16.h" + +#include + +#include "gtest/gtest.h" #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" -#include - namespace paddle { namespace platform { @@ -74,24 +75,27 @@ TEST(float16, conversion_cpu) { // Conversion operator EXPECT_EQ(Eigen::half(float16(1.0f)).x, 0x3c00); - EXPECT_EQ(float(float16(0.5f)), 0.5f); - EXPECT_NEAR(double(float16(0.33333)), 0.33333, 0.0001); - EXPECT_EQ(int(float16(-1)), -1); - EXPECT_EQ(bool(float16(true)), true); + EXPECT_EQ(static_cast(float16(0.5f)), 0.5f); + EXPECT_NEAR(static_cast(float16(0.33333)), 0.33333, 0.0001); + EXPECT_EQ(static_cast(float16(-1)), -1); + EXPECT_EQ(static_cast(float16(true)), true); } TEST(float16, arithmetic_cpu) { - EXPECT_EQ(float(float16(1) + float16(1)), 2); - EXPECT_EQ(float(float16(5) + float16(-5)), 0); - EXPECT_NEAR(float(float16(0.33333f) + float16(0.66667f)), 1.0f, 0.001); - EXPECT_EQ(float(float16(3) - float16(5)), -2); - EXPECT_NEAR(float(float16(0.66667f) - float16(0.33333f)), 0.33334f, 0.001); - EXPECT_NEAR(float(float16(3.3f) * float16(2.0f)), 6.6f, 0.01); - EXPECT_NEAR(float(float16(-2.1f) * float16(-3.0f)), 6.3f, 0.01); - EXPECT_NEAR(float(float16(2.0f) / float16(3.0f)), 0.66667f, 0.001); - EXPECT_EQ(float(float16(1.0f) / float16(2.0f)), 0.5f); - EXPECT_EQ(float(-float16(512.0f)), -512.0f); - EXPECT_EQ(float(-float16(-512.0f)), 512.0f); + EXPECT_EQ(static_cast(float16(1) + float16(1)), 2); + EXPECT_EQ(static_cast(float16(5) + float16(-5)), 0); + EXPECT_NEAR(static_cast(float16(0.33333f) + float16(0.66667f)), 1.0f, + 0.001); + EXPECT_EQ(static_cast(float16(3) - float16(5)), -2); + EXPECT_NEAR(static_cast(float16(0.66667f) - float16(0.33333f)), + 0.33334f, 0.001); + EXPECT_NEAR(static_cast(float16(3.3f) * float16(2.0f)), 6.6f, 0.01); + EXPECT_NEAR(static_cast(float16(-2.1f) * float16(-3.0f)), 6.3f, 0.01); + EXPECT_NEAR(static_cast(float16(2.0f) / float16(3.0f)), 0.66667f, + 0.001); + EXPECT_EQ(static_cast(float16(1.0f) / float16(2.0f)), 0.5f); + EXPECT_EQ(static_cast(-float16(512.0f)), -512.0f); + EXPECT_EQ(static_cast(-float16(-512.0f)), 512.0f); } TEST(float16, comparison_cpu) { @@ -137,5 +141,10 @@ TEST(float16, lod_tensor_cpu) { } } +TEST(float16, print) { + float16 a = float16(1.0f); + std::cout << a << std::endl; +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 567209df4edc483bcb5c6264c62034ddff50c413..577fc24ceb1d3c83cc0546dc5db9c8c7c1f01f86 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -36,19 +36,19 @@ limitations under the License. */ half *in1, *in2, *out; \ half *d_in1, *d_in2, *d_out; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - cudaMalloc((void**)&d_out, size); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ - out = (half*)malloc(size); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(size)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \ - EXPECT_EQ(float(float16(out[0])), v_out); \ + EXPECT_EQ(static_cast(float16(out[0])), v_out); \ free(in1); \ free(in2); \ free(out); \ @@ -63,17 +63,17 @@ limitations under the License. */ half *in1, *in2; \ half *d_in1, *d_in2; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ op_type<<<1, 1>>>(d_in1, d_in2); \ cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \ - EXPECT_EQ(float(float16(in1[0])), v_out); \ + EXPECT_EQ(static_cast(float16(in1[0])), v_out); \ free(in1); \ free(in2); \ cudaFree(d_in1); \ @@ -87,12 +87,12 @@ limitations under the License. */ half *d_in1, *d_in2; \ bool *out, *d_out; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - cudaMalloc((void**)&d_out, 1); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ - out = (bool*)malloc(1); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), 1); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(1)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ @@ -130,13 +130,13 @@ void TestNeg(float v_in, float v_out) { LOG(INFO) << "Test Neg on GPU!"; half *in, *d_in; int size = sizeof(half); - cudaMalloc((void**)&d_in, size); - in = (half*)malloc(size); + cudaMalloc(reinterpret_cast(&d_in), size); + in = reinterpret_cast(malloc(size)); in[0] = half(float16(v_in)); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); Neg<<<1, 1>>>(d_in); cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost); - EXPECT_EQ(float(float16(in[0])), v_out); + EXPECT_EQ(static_cast(float16(in[0])), v_out); free(in); cudaFree(d_in); } diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index dd70ff9ff574b32bc96a9e8255b1bf77a5cc84e4..aaebeb1353a13ab16fcf98f10da59d41fd2f5b48 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -14,8 +14,9 @@ limitations under the License. */ #include "paddle/fluid/platform/gpu_info.h" -#include "gflags/gflags.h" +#include +#include "gflags/gflags.h" #include "paddle/fluid/platform/enforce.h" DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, @@ -77,8 +78,8 @@ void SetDeviceId(int id) { "cudaSetDevice failed in paddle::platform::SetDeviceId"); } -void GpuMemoryUsage(size_t &available, size_t &total) { - PADDLE_ENFORCE(cudaMemGetInfo(&available, &total), +void GpuMemoryUsage(size_t *available, size_t *total) { + PADDLE_ENFORCE(cudaMemGetInfo(available, total), "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage"); } @@ -86,7 +87,7 @@ size_t GpuMaxAllocSize() { size_t total = 0; size_t available = 0; - GpuMemoryUsage(available, total); + GpuMemoryUsage(&available, &total); // Reserve the rest for page tables, etc. return static_cast(total * FLAGS_fraction_of_gpu_memory_to_use); @@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() { size_t total = 0; size_t available = 0; - GpuMemoryUsage(available, total); + GpuMemoryUsage(&available, &total); VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/" << total / 1024 / 1024 << "M"; size_t reserving = static_cast(0.05 * total); diff --git a/paddle/fluid/platform/gpu_info.h b/paddle/fluid/platform/gpu_info.h index fa469fa77f5ca780da153cc87da8d04f239711f3..36345e17406e22970806fa274d5a73a703517c43 100644 --- a/paddle/fluid/platform/gpu_info.h +++ b/paddle/fluid/platform/gpu_info.h @@ -23,10 +23,6 @@ limitations under the License. */ namespace paddle { namespace platform { -//! Environment variable: fraction of GPU memory to use on each device. -const std::string kEnvFractionGpuMemoryToUse = - "PADDLE_FRACTION_GPU_MEMORY_TO_USE"; - //! Get the total number of GPU devices in system. int GetCUDADeviceCount(); @@ -46,7 +42,7 @@ int GetCurrentDeviceId(); void SetDeviceId(int device_id); //! Get the memory usage of current GPU device. -void GpuMemoryUsage(size_t &available, size_t &total); +void GpuMemoryUsage(size_t *available, size_t *total); //! Get the maximum allocation size of current GPU device. size_t GpuMaxAllocSize(); diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index 90b78142b845e7e12c0c7dfb391f6aa3bd848436..de8056237fb022f62488e0fedf9a4f67e4601072 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once -#include +#include +#include "mkldnn/include/mkldnn.hpp" #include "paddle/fluid/framework/operator.h" namespace paddle { diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..ca9ab2c7aecff47924f0198802d710b7661f5576 --- /dev/null +++ b/paddle/fluid/platform/nccl_helper.h @@ -0,0 +1,143 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT +#include +#include +#include "paddle/fluid/platform/dynload/nccl.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace platform { + +inline ncclDataType_t ToNCCLDataType(std::type_index type) { + if (type == typeid(float)) { // NOLINT + return ncclFloat; + } else if (type == typeid(double)) { // NOLINT + return ncclDouble; + } else if (type == typeid(int)) { // NOLINT + return ncclInt; + } else if (type == typeid(int64_t)) { // NOLINT + return ncclInt64; + } else { + PADDLE_THROW("Not supported"); + } +} + +class NCCLGroupGuard { + public: + inline NCCLGroupGuard() { + mutex().lock(); + PADDLE_ENFORCE(dynload::ncclGroupStart()); + } + + inline ~NCCLGroupGuard() { + PADDLE_ENFORCE(dynload::ncclGroupEnd()); + mutex().unlock(); + } + + private: + static std::mutex &mutex() { + static std::mutex mtx; + return mtx; + } +}; + +struct NCCLContext { + std::unique_ptr ctx_; + ncclComm_t comm_; + + explicit NCCLContext(int dev_id) + : ctx_(new CUDADeviceContext(CUDAPlace(dev_id))), comm_{nullptr} {} + + cudaStream_t stream() const { return ctx_->stream(); } + + int device_id() const { + return boost::get(ctx_->GetPlace()).device; + } + + static void InitNCCLContext(std::unordered_map *contexts, + const std::vector &places) { + std::vector comms; + std::vector devs; + comms.resize(contexts->size()); + devs.reserve(contexts->size()); + + for (auto &p : places) { + devs.push_back(boost::get(p).device); + } + + PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( + &comms[0], static_cast(contexts->size()), &devs[0])); + + int i = 0; + for (auto &dev_id : devs) { + contexts->at(dev_id).comm_ = comms[i++]; + } + } +}; + +struct NCCLContextMap { + std::unordered_map contexts_; + std::vector order_; + + explicit NCCLContextMap(const std::vector &places) { + PADDLE_ENFORCE(!places.empty()); + order_.reserve(places.size()); + for (auto &p : places) { + int dev_id = boost::get(p).device; + order_.emplace_back(dev_id); + contexts_.emplace(dev_id, NCCLContext(dev_id)); + } + PADDLE_ENFORCE_EQ( + order_.size(), contexts_.size(), + "NCCL Context Map does not support contain two or more same device"); + + if (places.size() > 1) { + std::vector comms; + comms.resize(order_.size()); + + PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( + &comms[0], static_cast(order_.size()), &order_[0])); + + int i = 0; + for (auto &dev_id : order_) { + contexts_.at(dev_id).comm_ = comms[i++]; + } + } + } + + CUDADeviceContext *DevCtx(int dev_id) const { return at(dev_id).ctx_.get(); } + + CUDADeviceContext *DevCtx(platform::Place p) const { + return DevCtx(boost::get(p).device); + } + + const NCCLContext &at(platform::Place p) const { + return this->at(boost::get(p).device); + } + + const NCCLContext &at(int dev_id) const { return contexts_.at(dev_id); } + + void WaitAll() { + for (auto &p : contexts_) { + p.second.ctx_->Wait(); + } + } +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index de8f958eb012cb1ac563cbbbac8951e439bf8f33..655ce8485d4584aa0955315b045da6bf541f7fe2 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -26,6 +26,7 @@ class PlacePrinter : public boost::static_visitor<> { void operator()(const CUDAPlace &p) { os_ << "CUDAPlace(" << p.device << ")"; } + void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; } private: std::ostream &os_; @@ -40,12 +41,19 @@ const Place &get_place() { return the_default_place; } const CUDAPlace default_gpu() { return CUDAPlace(0); } const CPUPlace default_cpu() { return CPUPlace(); } +const CUDAPinnedPlace default_cuda_pinned() { return CUDAPinnedPlace(); } bool is_gpu_place(const Place &p) { return boost::apply_visitor(IsCUDAPlace(), p); } -bool is_cpu_place(const Place &p) { return !is_gpu_place(p); } +bool is_cpu_place(const Place &p) { + return boost::apply_visitor(IsCPUPlace(), p); +} + +bool is_cuda_pinned_place(const Place &p) { + return boost::apply_visitor(IsCUDAPinnedPlace(), p); +} bool places_are_same_class(const Place &p1, const Place &p2) { return p1.which() == p2.which(); @@ -53,7 +61,7 @@ bool places_are_same_class(const Place &p1, const Place &p2) { bool is_same_place(const Place &p1, const Place &p2) { if (places_are_same_class(p1, p2)) { - if (is_cpu_place(p1)) { + if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) { return true; } else { return boost::get(p1) == boost::get(p2); diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index 4cc8b377b8b671eb5a446ecbae21ba9628fbd2c8..ad54a878996bd36f2d714f6554b44c89dae3fd0c 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -11,10 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include +#include + #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/variant.h" @@ -45,12 +46,33 @@ struct CUDAPlace { int device; }; +struct CUDAPinnedPlace { + CUDAPinnedPlace() {} + + // needed for variant equality comparison + inline bool operator==(const CUDAPinnedPlace &) const { return true; } + inline bool operator!=(const CUDAPinnedPlace &) const { return false; } +}; + struct IsCUDAPlace : public boost::static_visitor { bool operator()(const CPUPlace &) const { return false; } bool operator()(const CUDAPlace &gpu) const { return true; } + bool operator()(const CUDAPinnedPlace &) const { return false; } }; -typedef boost::variant Place; +struct IsCPUPlace : public boost::static_visitor { + bool operator()(const CPUPlace &cpu) const { return true; } + bool operator()(const CUDAPlace &) const { return false; } + bool operator()(const CUDAPinnedPlace &) const { return false; } +}; + +struct IsCUDAPinnedPlace : public boost::static_visitor { + bool operator()(const CPUPlace &) const { return false; } + bool operator()(const CUDAPlace &) const { return false; } + bool operator()(const CUDAPinnedPlace &cuda_pinned) const { return true; } +}; + +typedef boost::variant Place; using PlaceList = std::vector; @@ -59,9 +81,11 @@ const Place &get_place(); const CUDAPlace default_gpu(); const CPUPlace default_cpu(); +const CUDAPinnedPlace default_cuda_pinned(); bool is_gpu_place(const Place &); bool is_cpu_place(const Place &); +bool is_cuda_pinned_place(const Place &); bool places_are_same_class(const Place &, const Place &); bool is_same_place(const Place &, const Place &); @@ -95,6 +119,16 @@ struct PlaceVisitorWrapper #else PADDLE_THROW("Paddle is not compiled with CUDA. Cannot visit cuda device"); return typename Visitor::result_type(); +#endif + } + + typename Visitor::result_type operator()( + const CUDAPinnedPlace &cuda_pinned) const { +#ifdef PADDLE_WITH_CUDA + return visitor_(cuda_pinned); +#else + PADDLE_THROW("Paddle is not compiled with CUDA. Cannot visit cuda_pinned"); + return typename Visitor::result_type(); #endif } }; diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index b25206ff35cc87dcdd363bc0de54530f629d73ed..412cdda286c3a77af002fdc5eb6a5ae440606b82 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -15,8 +15,11 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler.h" #include #include +#include #include #include +#include // NOLINT +#include #ifdef PADDLE_WITH_CUDA #include #endif // PADDLE_WITH_CUDA @@ -28,10 +31,10 @@ limitations under the License. */ namespace paddle { namespace platform { +struct EventList; + // The profiler state, the initial value is ProfilerState::kDisabled static ProfilerState g_state = ProfilerState::kDisabled; -// To record which timer the profiler used, CUDA or CPU. -static std::string g_profiler_place = ""; // The thread local event list only can be accessed by the specific thread // The thread index of each thread static thread_local int32_t g_thread_id; @@ -45,6 +48,39 @@ static std::list> g_all_event_lists; // The thread local event list only can be accessed by the specific thread static thread_local std::shared_ptr g_event_list; +struct EventList { + constexpr static size_t kMB = 1024 * 1024; + constexpr static size_t kEventBlockSize = 16 * kMB; + constexpr static size_t kEventSize = sizeof(Event); + constexpr static size_t kEventAlign = alignof(Event); + constexpr static size_t kNumBlock = + kEventBlockSize / + ((kEventSize + kEventAlign - 1) / kEventAlign * kEventAlign); + + template + void Record(Args&&... args) { + if (event_blocks.empty() || event_blocks.front().size() == kNumBlock) { + event_blocks.emplace_front(); + event_blocks.front().reserve(kNumBlock); + } + event_blocks.front().emplace_back(std::forward(args)...); + } + + std::vector Reduce() { + std::vector result; + for (auto& block : event_blocks) { + result.insert(result.begin(), std::make_move_iterator(block.begin()), + std::make_move_iterator(block.end())); + } + event_blocks.clear(); + return result; + } + + void Clear() { event_blocks.clear(); } + + std::forward_list> event_blocks; +}; + inline uint64_t GetTimeInNsec() { using clock = std::conditional(tv.tv_sec) * 1000000 + tv.tv_usec); } -Event::Event(EventKind kind, std::string name, uint32_t thread_id, +Event::Event(EventType type, std::string name, uint32_t thread_id, const DeviceContext* dev_ctx) - : kind_(kind), name_(name), thread_id_(thread_id), has_cuda_(false) { + : type_(type), name_(name), thread_id_(thread_id), has_cuda_(false) { #ifdef PADDLE_WITH_CUDA has_cuda_ = dev_ctx ? platform::is_gpu_place(dev_ctx->GetPlace()) : false; if (has_cuda_) { @@ -76,17 +112,7 @@ Event::Event(EventKind kind, std::string name, uint32_t thread_id, cpu_ns_ = GetTimeInNsec(); } -std::string Event::kind() const { - switch (kind_) { - case EventKind::kMark: - return "mark"; - case EventKind::kPushRange: - return "push"; - case EventKind::kPopRange: - return "pop"; - } - PADDLE_THROW("Unknown EventKind."); -} +const EventType& Event::type() const { return type_; } double Event::CpuElapsedMs(const Event& e) const { return (e.cpu_ns_ - cpu_ns_) / (1000000.0); @@ -129,15 +155,15 @@ inline EventList& GetEventList() { } void Mark(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventKind::kMark, name, g_thread_id, dev_ctx); + GetEventList().Record(EventType::kMark, name, g_thread_id, dev_ctx); } void PushEvent(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventKind::kPushRange, name, g_thread_id, dev_ctx); + GetEventList().Record(EventType::kPushRange, name, g_thread_id, dev_ctx); } void PopEvent(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventKind::kPopRange, name, g_thread_id, dev_ctx); + GetEventList().Record(EventType::kPopRange, name, g_thread_id, dev_ctx); } RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) @@ -197,12 +223,7 @@ void EnableProfiler(ProfilerState state) { "The profiling state should be disabled when calling ", "EnableProfiler."); g_state = state; - if (g_state == ProfilerState::kCUDA) { - g_profiler_place = "CUDA"; - } else if (g_state == ProfilerState::kCPU) { - g_profiler_place = "CPU"; - } else { - g_profiler_place = "All"; + if (g_state == ProfilerState::kAll) { GetDeviceTracer()->Enable(); } #ifdef PADDLE_WITH_CUDA @@ -240,27 +261,63 @@ std::vector> GetAllEvents() { return result; } -void DisableProfiler(EventSortingKey sorted_key, - const std::string& profile_path) { - PADDLE_ENFORCE(g_state != ProfilerState::kDisabled, - "Can't disable profiling, since it's not starting."); - // Mark the profiling stop. - Mark("_stop_profiler_", nullptr); - g_state = ProfilerState::kDisabled; +// The information of each event given in the profiling report +struct EventItem { + std::string name; + int calls; + double total_time; + double min_time; + double max_time; + double ave_time; +}; + +// Print results +void PrintProfiler(const std::vector>& events_table, + const std::string& sorted_domain, const size_t name_width, + const size_t data_width) { + // Output header information + std::cout << "\n------------------------->" + << " Profiling Report " + << "<-------------------------\n\n"; + std::string place; + if (g_state == ProfilerState::kCPU) { + place = "CPU"; + } else if (g_state == ProfilerState::kCUDA) { + place = "CUDA"; + } else if (g_state == ProfilerState::kAll) { + place = "All"; + } else { + PADDLE_THROW("Invalid profiler state"); + } - std::vector> all_events = GetAllEvents(); - ParseEvents(all_events, sorted_key); - ResetProfiler(); - DeviceTracer* tracer = GetDeviceTracer(); - if (g_profiler_place == "All" && tracer && tracer->IsEnabled()) { - tracer->Disable(); - tracer->GenProfile(profile_path); + std::cout << "Place: " << place << std::endl; + std::cout << "Time unit: ms" << std::endl; + std::cout << "Sorted by " << sorted_domain + << " in descending order in the same thread\n\n"; + // Output events table + std::cout.setf(std::ios::left); + std::cout << std::setw(name_width) << "Event" << std::setw(data_width) + << "Calls" << std::setw(data_width) << "Total" + << std::setw(data_width) << "Min." << std::setw(data_width) + << "Max." << std::setw(data_width) << "Ave." << std::endl; + for (size_t i = 0; i < events_table.size(); ++i) { + for (size_t j = 0; j < events_table[i].size(); ++j) { + const EventItem& event_item = events_table[i][j]; + std::cout << std::setw(name_width) << event_item.name + << std::setw(data_width) << event_item.calls + << std::setw(data_width) << event_item.total_time + << std::setw(data_width) << event_item.min_time + << std::setw(data_width) << event_item.max_time + << std::setw(data_width) << event_item.ave_time << std::endl; + } } + std::cout << std::endl; } -void ParseEvents(std::vector>& events, - EventSortingKey sorted_by) { - if (g_profiler_place == "") return; +// Parse the event list and output the profiling report +void ParseEvents(const std::vector>& events, + EventSortingKey sorted_by = EventSortingKey::kDefault) { + if (g_state == ProfilerState::kDisabled) return; std::string sorted_domain; std::function sorted_func; @@ -307,9 +364,9 @@ void ParseEvents(std::vector>& events, std::unordered_map event_idx; for (size_t j = 0; j < events[i].size(); j++) { - if (events[i][j].kind() == "push") { + if (events[i][j].type() == EventType::kPushRange) { pushed_events.push_back(events[i][j]); - } else if (events[i][j].kind() == "pop") { + } else if (events[i][j].type() == EventType::kPopRange) { std::list::reverse_iterator rit = pushed_events.rbegin(); while (rit != pushed_events.rend() && rit->name() != events[i][j].name()) { @@ -317,10 +374,10 @@ void ParseEvents(std::vector>& events, } if (rit != pushed_events.rend()) { - double event_time = - (g_profiler_place == "CUDA" || g_profiler_place == "All") - ? rit->CudaElapsedMs(events[i][j]) - : rit->CpuElapsedMs(events[i][j]); + double event_time = (g_state == ProfilerState::kCUDA || + g_state == ProfilerState::kAll) + ? rit->CudaElapsedMs(events[i][j]) + : rit->CpuElapsedMs(events[i][j]); std::string event_name = "thread" + std::to_string(rit->thread_id()) + "::" + rit->name(); @@ -376,35 +433,22 @@ void ParseEvents(std::vector>& events, PrintProfiler(events_table, sorted_domain, max_name_width + 4, 12); } -void PrintProfiler(std::vector>& events_table, - std::string& sorted_domain, const size_t name_width, - const size_t data_width) { - // Output header information - std::cout << "\n------------------------->" - << " Profiling Report " - << "<-------------------------\n\n"; - std::cout << "Place: " << g_profiler_place << std::endl; - std::cout << "Time unit: ms" << std::endl; - std::cout << "Sorted by " << sorted_domain - << " in descending order in the same thread\n\n"; - // Output events table - std::cout.setf(std::ios::left); - std::cout << std::setw(name_width) << "Event" << std::setw(data_width) - << "Calls" << std::setw(data_width) << "Total" - << std::setw(data_width) << "Min." << std::setw(data_width) - << "Max." << std::setw(data_width) << "Ave." << std::endl; - for (size_t i = 0; i < events_table.size(); ++i) { - for (size_t j = 0; j < events_table[i].size(); ++j) { - EventItem& event_item = events_table[i][j]; - std::cout << std::setw(name_width) << event_item.name - << std::setw(data_width) << event_item.calls - << std::setw(data_width) << event_item.total_time - << std::setw(data_width) << event_item.min_time - << std::setw(data_width) << event_item.max_time - << std::setw(data_width) << event_item.ave_time << std::endl; - } +void DisableProfiler(EventSortingKey sorted_key, + const std::string& profile_path) { + PADDLE_ENFORCE(g_state != ProfilerState::kDisabled, + "Can't disable profiling, since it's not starting."); + // Mark the profiling stop. + Mark("_stop_profiler_", nullptr); + + std::vector> all_events = GetAllEvents(); + ParseEvents(all_events, sorted_key); + ResetProfiler(); + DeviceTracer* tracer = GetDeviceTracer(); + if (g_state == ProfilerState::kAll && tracer && tracer->IsEnabled()) { + tracer->Disable(); + tracer->GenProfile(profile_path); } - std::cout << std::endl; + g_state = ProfilerState::kDisabled; } } // namespace platform diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index de9a5cc20d76bf84778e0933831f218abb66c465..b07427c8f6903e0100ca9a478881444d86501bcc 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include +#include #include #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/profiler.pb.h" @@ -23,16 +23,16 @@ limitations under the License. */ namespace paddle { namespace platform { -enum EventKind { kMark, kPushRange, kPopRange }; +enum EventType { kMark, kPushRange, kPopRange }; class Event { public: // The DeviceContext is used to get the cuda stream. // If CPU profiling mode, can pass nullptr. - Event(EventKind kind, std::string name, uint32_t thread_id, + Event(EventType type, std::string name, uint32_t thread_id, const DeviceContext* dev_ctx); - std::string kind() const; + const EventType& type() const; std::string name() const { return name_; } uint32_t thread_id() const { return thread_id_; } bool has_cuda() const { return has_cuda_; } @@ -46,7 +46,7 @@ class Event { double CudaElapsedMs(const Event& e) const; private: - EventKind kind_; + EventType type_; std::string name_; uint32_t thread_id_; int64_t cpu_ns_; @@ -57,39 +57,6 @@ class Event { #endif }; -struct EventList { - constexpr static size_t kMB = 1024 * 1024; - constexpr static size_t kEventBlockSize = 16 * kMB; - constexpr static size_t kEventSize = sizeof(Event); - constexpr static size_t kEventAlign = alignof(Event); - constexpr static size_t kNumBlock = - kEventBlockSize / - ((kEventSize + kEventAlign - 1) / kEventAlign * kEventAlign); - - template - void Record(Args&&... args) { - if (event_blocks.empty() || event_blocks.front().size() == kNumBlock) { - event_blocks.emplace_front(); - event_blocks.front().reserve(kNumBlock); - } - event_blocks.front().emplace_back(std::forward(args)...); - } - - std::vector Reduce() { - std::vector result; - for (auto& block : event_blocks) { - result.insert(result.begin(), std::make_move_iterator(block.begin()), - std::make_move_iterator(block.end())); - } - event_blocks.clear(); - return result; - } - - void Clear() { event_blocks.clear(); } - - std::forward_list> event_blocks; -}; - enum ProfilerState { kDisabled, // disabled state kCPU, // CPU profiling state @@ -136,16 +103,6 @@ struct RecordThread { // event_lists, event_lists[i][j] represents the j-th Event of i-th thread. std::vector> GetAllEvents(); -// The information of each event given in the profiling report -struct EventItem { - std::string name; - int calls; - double total_time; - double min_time; - double max_time; - double ave_time; -}; - // Candidate keys to sort the profiling report enum EventSortingKey { kDefault, kCalls, kTotal, kMin, kMax, kAve }; @@ -158,14 +115,5 @@ void ResetProfiler(); void DisableProfiler(EventSortingKey sorted_key, const std::string& profile_path); -// Parse the event list and output the profiling report -void ParseEvents(std::vector>&, - EventSortingKey sorted_by = EventSortingKey::kDefault); - -// Print results -void PrintProfiler(std::vector>& events_table, - std::string& sorted_domain, const size_t name_width, - const size_t data_width); - } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc index fc77e0f3213da776e0b05ad5b5da9081665cdf6e..61f467814ba4a24c8b73f1bc614cda0ab8c4debd 100644 --- a/paddle/fluid/platform/profiler_test.cc +++ b/paddle/fluid/platform/profiler_test.cc @@ -13,19 +13,23 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/profiler.h" +#include +#ifdef PADDLE_WITH_CUDA +#include +#endif #include "gtest/gtest.h" TEST(Event, CpuElapsedTime) { using paddle::platform::Event; - using paddle::platform::EventKind; + using paddle::platform::EventType; - Event start_event(EventKind::kPushRange, "test", 0, nullptr); + Event start_event(EventType::kPushRange, "test", 0, nullptr); EXPECT_TRUE(start_event.has_cuda() == false); int counter = 0; while (counter != 1000) { counter++; } - Event stop_event(EventKind::kPopRange, "test", 0, nullptr); + Event stop_event(EventType::kPopRange, "test", 0, nullptr); EXPECT_GT(start_event.CpuElapsedMs(stop_event), 0); } @@ -35,16 +39,16 @@ TEST(Event, CudaElapsedTime) { using paddle::platform::CUDADeviceContext; using paddle::platform::CUDAPlace; using paddle::platform::Event; - using paddle::platform::EventKind; + using paddle::platform::EventType; DeviceContext* dev_ctx = new CUDADeviceContext(CUDAPlace(0)); - Event start_event(EventKind::kPushRange, "test", 0, dev_ctx); + Event start_event(EventType::kPushRange, "test", 0, dev_ctx); EXPECT_TRUE(start_event.has_cuda() == true); int counter = 0; while (counter != 1000) { counter++; } - Event stop_event(EventKind::kPopRange, "test", 0, dev_ctx); + Event stop_event(EventType::kPopRange, "test", 0, dev_ctx); EXPECT_GT(start_event.CudaElapsedMs(stop_event), 0); } #endif @@ -52,7 +56,7 @@ TEST(Event, CudaElapsedTime) { TEST(RecordEvent, RecordEvent) { using paddle::platform::DeviceContext; using paddle::platform::Event; - using paddle::platform::EventKind; + using paddle::platform::EventType; using paddle::platform::RecordEvent; using paddle::platform::ProfilerState; using paddle::platform::EventSortingKey; @@ -157,3 +161,13 @@ TEST(RecordEvent, RecordEvent) { // Will remove parsing-related code from test later DisableProfiler(EventSortingKey::kTotal, "/tmp/profiler"); } + +#ifdef PADDLE_WITH_CUDA +TEST(TMP, stream_wait) { + cudaStream_t stream; + cudaStreamCreate(&stream); + cudaStreamSynchronize(stream); + cudaStreamSynchronize(stream); + cudaStreamSynchronize(stream); +} +#endif diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index 917c48b47f8d70cd821d45dfbc6bafa494710ffa..7877d3e41c1c993662f5d91b263cbcb71db74c36 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -14,29 +14,44 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/place.h" -#include -#include #ifdef __NVCC__ #include #include -#include "paddle/fluid/platform/details/device_ptr_cast.h" +#include "paddle/fluid/platform/details/cuda_transform_iterator_cast.h" #endif namespace paddle { namespace platform { -// Transform on host or device. It provides the same API in std library. +// Transform applys a unary or a binary functor on each element in a +// range defined by a pair of iterators. +// +// - The specialization for CPU calls std::transform. +// - The specialization for CUDA calls thrust::tranform. +// +// NOTE: We need to define InputIter and OutputIter defined as +// different types, because the InputIter points op's inputs and +// OutputIter pints to op's outputs. +// +// NOTE: We don't assume that InputIter to be const InputType* and +// OutputIter to be OutputType*, because we might use a iterator +// class, paddle::fluid::operators::RowwiseTRansformIterator. template struct Transform { + // The unary version. template void operator()(const DeviceContext& context, InputIter first, InputIter last, OutputIter result, UnaryOperation op); + // The binary version. template void operator()(const DeviceContext& context, InputIter1 first1, @@ -70,8 +85,9 @@ struct Transform { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); thrust::transform(thrust::cuda::par.on(context.stream()), - details::DevPtrCast(first), details::DevPtrCast(last), - details::DevPtrCast(result), op); + details::CastToCUDATransformIterator(first), + details::CastToCUDATransformIterator(last), + details::CastToCUDATransformIterator(result), op); } template { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); thrust::transform(thrust::cuda::par.on(context.stream()), - details::DevPtrCast(first1), details::DevPtrCast(last1), - details::DevPtrCast(first2), details::DevPtrCast(result), - op); + details::CastToCUDATransformIterator(first1), + details::CastToCUDATransformIterator(last1), + details::CastToCUDATransformIterator(first2), + details::CastToCUDATransformIterator(result), op); } }; #endif diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index 7b5cfd8f43473dc6bc784e98bd26fdd9e0ba9994..f65d1f60100edc85ba9745ed36f26a0ed160d80f 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -18,11 +18,12 @@ limitations under the License. */ #include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +namespace { + template class Scale { public: explicit Scale(const T& scale) : scale_(scale) {} - HOSTDEVICE T operator()(const T& a) const { return a * scale_; } private: @@ -35,11 +36,23 @@ class Multiply { HOSTDEVICE T operator()(const T& a, const T& b) const { return a * b; } }; +} // namespace + +using paddle::memory::Alloc; +using paddle::memory::Free; +using paddle::memory::Copy; + +using paddle::platform::CPUPlace; +using paddle::platform::CUDAPlace; +using paddle::platform::CPUDeviceContext; +using paddle::platform::CUDADeviceContext; + +using paddle::platform::Transform; + TEST(Transform, CPUUnary) { - using namespace paddle::platform; CPUDeviceContext ctx; float buf[4] = {0.1, 0.2, 0.3, 0.4}; - Transform trans; + Transform trans; trans(ctx, buf, buf + 4, buf, Scale(10)); for (int i = 0; i < 4; ++i) { ASSERT_NEAR(buf[i], static_cast(i + 1), 1e-5); @@ -47,14 +60,12 @@ TEST(Transform, CPUUnary) { } TEST(Transform, GPUUnary) { - using namespace paddle::platform; - using namespace paddle::memory; CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf), ctx.stream()); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); ctx.Wait(); Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf), ctx.stream()); @@ -65,10 +76,8 @@ TEST(Transform, GPUUnary) { } TEST(Transform, CPUBinary) { - using namespace paddle::platform; - using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; - Transform trans; + Transform trans; CPUDeviceContext ctx; trans(ctx, buf, buf + 4, buf, buf, Multiply()); for (int i = 0; i < 4; ++i) { @@ -77,14 +86,12 @@ TEST(Transform, CPUBinary) { } TEST(Transform, GPUBinary) { - using namespace paddle::platform; - using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf), ctx.stream()); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); ctx.Wait(); Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf), ctx.stream()); diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 05ca33137de8db5291c8e38fc03457d05092cea8..45f60fc9d76560b133fa06198a24c7eaccc24088 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -14,29 +14,25 @@ limitations under the License. */ #pragma once -#ifdef __CUDACC__ -#ifdef __CUDACC_VER_MAJOR__ -// CUDA 9 define `__CUDACC_VER__` as a warning message, manually define -// __CUDACC_VER__ instead. +// Boost 1.41.0 requires __CUDACC_VER__, but in CUDA 9 __CUDACC_VER__ +// is removed, so we have to manually define __CUDACC_VER__ instead. +// For details, please refer to +// https://github.com/PaddlePaddle/Paddle/issues/6626 +#if defined(__CUDACC__) && defined(__CUDACC_VER_MAJOR__) #undef __CUDACC_VER__ - -#define __CUDACC_VER__ \ - (__CUDACC_VER_MAJOR__ * 10000 + __CUDACC_VER_MINOR__ * 100 + \ - __CUDACC_VER_BUILD__) -#endif - +#define __CUDACC_VER__ \ + __CUDACC_VER_BUILD__ + __CUDACC_VER_MAJOR__ * 10000 + \ + __CUDACC_VER_MINOR__ * 100 #endif -#include +#include "boost/config.hpp" -#ifdef PADDLE_WITH_CUDA - -// Because boost's variadic templates has bug on nvcc, boost will disable -// variadic template support when GPU enabled on nvcc. -// Define BOOST_NO_CXX11_VARIADIC_TEMPLATES on gcc/clang to generate same -// function symbols. -// +// Because Boost 1.41.0's variadic templates has bug on nvcc, boost +// will disable variadic template support in NVCC mode. Define +// BOOST_NO_CXX11_VARIADIC_TEMPLATES on gcc/clang to generate same +// function symbols. For details, // https://github.com/PaddlePaddle/Paddle/issues/3386 +#ifdef PADDLE_WITH_CUDA #ifndef BOOST_NO_CXX11_VARIADIC_TEMPLATES #define BOOST_NO_CXX11_VARIADIC_TEMPLATES #endif diff --git a/paddle/fluid/pybind/.clang-format b/paddle/fluid/pybind/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/pybind/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/pybind/.gitignore b/paddle/fluid/pybind/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8f222791edb016df65be5db75831f5f83cf63726 --- /dev/null +++ b/paddle/fluid/pybind/.gitignore @@ -0,0 +1 @@ +pybind.h diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index fe991033dfc2a6ccc66b0ca5588fe8f808d1eb43..4fef351c2118e43697606c90a616cd870e78cd77 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -2,15 +2,19 @@ if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + parallel_executor ${GLOB_OP_LIB}) else() cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + parallel_executor ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) target_link_libraries(paddle_pybind rt) endif(NOT APPLE AND NOT ANDROID) endif(WITH_AMD_GPU) + + cc_test(tensor_py_test SRCS tensor_py_test.cc DEPS python) endif(WITH_PYTHON) diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 6657b25ed2443c1ac9cb0a09098968d3181fc6ba..3f28e616494ad1322708ad6403aaf50b22d724e6 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "const_value.h" +#include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/framework/operator.h" namespace paddle { namespace pybind { -void BindConstValue(pybind11::module& m) { - m.def("kEmptyVarName", [] { return framework::kEmptyVarName; }); - m.def("kTempVarName", [] { return framework::kTempVarName; }); - m.def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); - m.def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); +void BindConstValue(pybind11::module* m) { + m->def("kEmptyVarName", [] { return framework::kEmptyVarName; }); + m->def("kTempVarName", [] { return framework::kTempVarName; }); + m->def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); + m->def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); } } // namespace pybind diff --git a/paddle/fluid/pybind/const_value.h b/paddle/fluid/pybind/const_value.h index 79e71e039dea6585aaf8193f1417c6ab3fbf6f76..2fab3160d1d95af7f6a49c472c2e211c19e67cac 100644 --- a/paddle/fluid/pybind/const_value.h +++ b/paddle/fluid/pybind/const_value.h @@ -11,16 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include + #include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" -namespace py = pybind11; - namespace paddle { namespace pybind { -extern void BindConstValue(pybind11::module& m); + +void BindConstValue(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 4bd3ecf728dedaf74a554f77b114065f2d515786..08a2f185e117718d07ba984f76dfe5bf8229c33c 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -17,8 +17,8 @@ limitations under the License. */ namespace paddle { namespace pybind { -void BindException(pybind11::module& m) { - static pybind11::exception exc(m, "EnforceNotMet"); +void BindException(pybind11::module* m) { + static pybind11::exception exc(*m, "EnforceNotMet"); pybind11::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); @@ -27,7 +27,8 @@ void BindException(pybind11::module& m) { } }); - m.def("__unittest_throw_exception__", [] { PADDLE_THROW("test exception"); }); + m->def("__unittest_throw_exception__", + [] { PADDLE_THROW("test exception"); }); } } // namespace pybind diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index bc6b0c067978959d4cdafec51db9574927b34b21..5e054267361f2c62b3ad36581be0ad17ce0718de 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -11,14 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include + #include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" + namespace paddle { namespace pybind { -extern void BindException(pybind11::module& m); +void BindException(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 45a64f43846e79c27295e52c59dca6bdfaa120a3..93533e5c9d88a9113d4d3eacb01901a8c14b6324 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -11,11 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/pybind/protobuf.h" + #include #include -#include "paddle/fluid/framework/backward.h" +#include +#include + #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/program_desc.h" @@ -95,10 +97,11 @@ struct type_caster> namespace paddle { namespace pybind { -using namespace paddle::framework; // NOLINT +namespace pd = paddle::framework; template -static py::bytes SerializeMessage(T &self) { +static pybind11::bytes SerializeMessage( + T &self) { // NOLINT due to pybind11 convention. // Check IsInitialized in Python std::string retv; PADDLE_ENFORCE(self.Proto()->SerializePartialToString(&retv), @@ -107,203 +110,198 @@ static py::bytes SerializeMessage(T &self) { } // Bind Methods -void BindProgramDesc(py::module &m) { - py::class_(m, "ProgramDesc", "") - .def(py::init<>()) +void BindProgramDesc(pybind11::module *m) { + pybind11::class_(*m, "ProgramDesc", "") + .def(pybind11::init<>()) .def("__init__", - [](ProgramDesc &self, const ProgramDesc &other) { - new (&self) ProgramDesc(other); + [](pd::ProgramDesc &self, const pd::ProgramDesc &other) { + new (&self) pd::ProgramDesc(other); }) .def("__init__", - [](ProgramDesc &self, const py::bytes &binary_str) { + [](pd::ProgramDesc &self, const pybind11::bytes &binary_str) { std::string str(binary_str); - new (&self) ProgramDesc(str); + new (&self) pd::ProgramDesc(str); }) - .def("append_block", &ProgramDesc::AppendBlock, - py::return_value_policy::reference) - .def("append_backward", - [](ProgramDesc &program_desc, const VarDesc &target, - const std::unordered_set &no_grad_vars) { - ParamGradInfoMap param_grad_map = - AppendBackward(program_desc, target, no_grad_vars); - std::unordered_map< - std::string, std::tuple> - retv; - for (auto it = param_grad_map.begin(); it != param_grad_map.end(); - ++it) { - const auto &grad_info = it->second; - retv[it->first] = std::make_tuple( - grad_info.name_, grad_info.block_idx_, grad_info.op_idx_); - } - return retv; - }) - .def("block", &ProgramDesc::MutableBlock, - py::return_value_policy::reference) - .def("num_blocks", &ProgramDesc::Size) - .def("serialize_to_string", SerializeMessage) + .def("append_block", &pd::ProgramDesc::AppendBlock, + pybind11::return_value_policy::reference) + .def("block", &pd::ProgramDesc::MutableBlock, + pybind11::return_value_policy::reference) + .def("num_blocks", &pd::ProgramDesc::Size) + .def("serialize_to_string", SerializeMessage) .def("parse_from_string", - [](ProgramDesc &program_desc, const std::string &data) { - proto::ProgramDesc *desc = program_desc.Proto(); + [](pd::ProgramDesc &program_desc, const std::string &data) { + pd::proto::ProgramDesc *desc = program_desc.Proto(); PADDLE_ENFORCE(desc->ParseFromString(data), "Fail to parse ProgramDesc from string. This could " "be a bug of Paddle."); }); } -void BindBlockDesc(py::module &m) { - py::class_(m, "BlockDesc", "") - .def_property_readonly("id", &BlockDesc::ID) - .def_property_readonly("parent", &BlockDesc::Parent) - .def("get_forward_block_idx", &BlockDesc::ForwardBlockID) - .def("set_forward_block_idx", &BlockDesc::SetForwardBlockID) - .def("append_op", &BlockDesc::AppendOp, - py::return_value_policy::reference) - .def("prepend_op", &BlockDesc::PrependOp, - py::return_value_policy::reference) - .def("insert_op", &BlockDesc::InsertOp, - py::return_value_policy::reference) - .def("remove_op", &BlockDesc::RemoveOp) +void BindBlockDesc(pybind11::module *m) { + pybind11::class_(*m, "BlockDesc", "") + .def_property_readonly("id", &pd::BlockDesc::ID) + .def_property_readonly("parent", &pd::BlockDesc::Parent) + .def("get_forward_block_idx", &pd::BlockDesc::ForwardBlockID) + .def("set_forward_block_idx", &pd::BlockDesc::SetForwardBlockID) + .def("append_op", &pd::BlockDesc::AppendOp, + pybind11::return_value_policy::reference) + .def("prepend_op", &pd::BlockDesc::PrependOp, + pybind11::return_value_policy::reference) + .def("insert_op", &pd::BlockDesc::InsertOp, + pybind11::return_value_policy::reference) + .def("remove_op", &pd::BlockDesc::RemoveOp) .def("var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.Var(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("has_var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.HasVar(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("rename_var", - [](BlockDesc &self, const py::bytes &byte_name, - const py::bytes &byte_name_new) { + [](pd::BlockDesc &self, const pybind11::bytes &byte_name, + const pybind11::bytes &byte_name_new) { std::string name = byte_name; std::string new_name = byte_name_new; self.RenameVar(name, new_name); }) .def("has_var_recursive", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.HasVarRecursive(name); }) .def("find_var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.FindVar(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("find_var_recursive", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.FindVarRecursive(name); }, - py::return_value_policy::reference) - .def("all_vars", &BlockDesc::AllVars, py::return_value_policy::reference) - .def("op_size", &BlockDesc::OpSize) - .def("op", &BlockDesc::Op, py::return_value_policy::reference) - .def("serialize_to_string", SerializeMessage); + pybind11::return_value_policy::reference) + .def("remove_var", + [](pd::BlockDesc &self, pybind11::bytes byte_name) { + std::string name = byte_name; + return self.RemoveVar(name); + }, + pybind11::return_value_policy::reference) + .def("all_vars", &pd::BlockDesc::AllVars, + pybind11::return_value_policy::reference) + .def("op_size", &pd::BlockDesc::OpSize) + .def("op", &pd::BlockDesc::Op, pybind11::return_value_policy::reference) + .def("serialize_to_string", SerializeMessage); } -void BindVarDsec(py::module &m) { - py::class_ var_desc(m, "VarDesc", ""); +void BindVarDsec(pybind11::module *m) { + pybind11::class_ var_desc(*m, "VarDesc", ""); var_desc .def("name", - [](VarDesc &self) { - py::bytes name = self.Name(); + [](pd::VarDesc &self) { + pybind11::bytes name = self.Name(); return name; }, - py::return_value_policy::reference) - .def("set_name", &VarDesc::SetName) - .def("set_shape", &VarDesc::SetShape) - .def("set_shapes", &VarDesc::SetShapes) - .def("set_dtype", &VarDesc::SetDataType) - .def("set_dtypes", &VarDesc::SetDataTypes) - .def("set_capacity", &VarDesc::SetCapacity) - .def("shape", &VarDesc::GetShape, py::return_value_policy::reference) - .def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) - .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) - .def("dtypes", &VarDesc::GetDataTypes, py::return_value_policy::reference) - .def("lod_level", &VarDesc::GetLoDLevel) - .def("lod_levels", &VarDesc::GetLoDLevels, - py::return_value_policy::reference) - .def("set_lod_level", &VarDesc::SetLoDLevel) - .def("set_lod_levels", &VarDesc::SetLoDLevels) - .def("type", &VarDesc::GetType) - .def("set_type", &VarDesc::SetType) - .def("serialize_to_string", SerializeMessage) - .def("persistable", &VarDesc::Persistable) - .def("set_persistable", &VarDesc::SetPersistable); + pybind11::return_value_policy::reference) + .def("set_name", &pd::VarDesc::SetName) + .def("set_shape", &pd::VarDesc::SetShape) + .def("set_shapes", &pd::VarDesc::SetShapes) + .def("set_dtype", &pd::VarDesc::SetDataType) + .def("set_dtypes", &pd::VarDesc::SetDataTypes) + .def("set_capacity", &pd::VarDesc::SetCapacity) + .def("shape", &pd::VarDesc::GetShape, + pybind11::return_value_policy::reference) + .def("shapes", &pd::VarDesc::GetShapes, + pybind11::return_value_policy::reference) + .def("dtype", &pd::VarDesc::GetDataType, + pybind11::return_value_policy::reference) + .def("dtypes", &pd::VarDesc::GetDataTypes, + pybind11::return_value_policy::reference) + .def("lod_level", &pd::VarDesc::GetLoDLevel) + .def("lod_levels", &pd::VarDesc::GetLoDLevels, + pybind11::return_value_policy::reference) + .def("set_lod_level", &pd::VarDesc::SetLoDLevel) + .def("set_lod_levels", &pd::VarDesc::SetLoDLevels) + .def("type", &pd::VarDesc::GetType) + .def("set_type", &pd::VarDesc::SetType) + .def("serialize_to_string", SerializeMessage) + .def("persistable", &pd::VarDesc::Persistable) + .def("set_persistable", &pd::VarDesc::SetPersistable); - py::enum_(var_desc, "VarType", "") - .value("BOOL", proto::VarType::BOOL) - .value("INT16", proto::VarType::INT16) - .value("INT32", proto::VarType::INT32) - .value("INT64", proto::VarType::INT64) - .value("FP16", proto::VarType::FP16) - .value("FP32", proto::VarType::FP32) - .value("FP64", proto::VarType::FP64) - .value("LOD_TENSOR", proto::VarType::LOD_TENSOR) - .value("SELECTED_ROWS", proto::VarType::SELECTED_ROWS) - .value("FEED_MINIBATCH", proto::VarType::FEED_MINIBATCH) - .value("FETCH_LIST", proto::VarType::FETCH_LIST) - .value("STEP_SCOPES", proto::VarType::STEP_SCOPES) - .value("LOD_RANK_TABLE", proto::VarType::LOD_RANK_TABLE) - .value("LOD_TENSOR_ARRAY", proto::VarType::LOD_TENSOR_ARRAY) - .value("CHANNEL", proto::VarType::CHANNEL) - .value("PLACE_LIST", proto::VarType::PLACE_LIST) - .value("READER", proto::VarType::READER) - .value("RAW", proto::VarType::RAW); + pybind11::enum_(var_desc, "VarType", "") + .value("BOOL", pd::proto::VarType::BOOL) + .value("INT16", pd::proto::VarType::INT16) + .value("INT32", pd::proto::VarType::INT32) + .value("INT64", pd::proto::VarType::INT64) + .value("FP16", pd::proto::VarType::FP16) + .value("FP32", pd::proto::VarType::FP32) + .value("FP64", pd::proto::VarType::FP64) + .value("LOD_TENSOR", pd::proto::VarType::LOD_TENSOR) + .value("SELECTED_ROWS", pd::proto::VarType::SELECTED_ROWS) + .value("FEED_MINIBATCH", pd::proto::VarType::FEED_MINIBATCH) + .value("FETCH_LIST", pd::proto::VarType::FETCH_LIST) + .value("STEP_SCOPES", pd::proto::VarType::STEP_SCOPES) + .value("LOD_RANK_TABLE", pd::proto::VarType::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", pd::proto::VarType::LOD_TENSOR_ARRAY) + .value("CHANNEL", pd::proto::VarType::CHANNEL) + .value("PLACE_LIST", pd::proto::VarType::PLACE_LIST) + .value("READER", pd::proto::VarType::READER) + .value("RAW", pd::proto::VarType::RAW); } -void BindOpDesc(py::module &m) { - py::enum_(m, "AttrType", "") - .value("INT", proto::AttrType::INT) - .value("INTS", proto::AttrType::INTS) - .value("FLOAT", proto::AttrType::FLOAT) - .value("FLOATS", proto::AttrType::FLOATS) - .value("STRING", proto::AttrType::STRING) - .value("STRINGS", proto::AttrType::STRINGS) - .value("BOOL", proto::AttrType::BOOLEAN) - .value("BOOLS", proto::AttrType::BOOLEANS) - .value("BLOCK", proto::AttrType::BLOCK); +void BindOpDesc(pybind11::module *m) { + pybind11::enum_(*m, "AttrType", "") + .value("INT", pd::proto::AttrType::INT) + .value("INTS", pd::proto::AttrType::INTS) + .value("FLOAT", pd::proto::AttrType::FLOAT) + .value("FLOATS", pd::proto::AttrType::FLOATS) + .value("STRING", pd::proto::AttrType::STRING) + .value("STRINGS", pd::proto::AttrType::STRINGS) + .value("BOOL", pd::proto::AttrType::BOOLEAN) + .value("BOOLS", pd::proto::AttrType::BOOLEANS) + .value("BLOCK", pd::proto::AttrType::BLOCK); - py::class_ op_desc(m, "OpDesc", ""); + pybind11::class_ op_desc(*m, "OpDesc", ""); op_desc - .def("__init__", [](OpDesc &self) { new (&self) OpDesc(); }, - py::return_value_policy::reference) - .def("copy_from", &OpDesc::CopyFrom) - .def("type", &OpDesc::Type) - .def("set_type", &OpDesc::SetType) - .def("input", &OpDesc::Input) - .def("input_names", &OpDesc::InputNames) - .def("output", &OpDesc::Output) - .def("output_names", &OpDesc::OutputNames) - .def("set_input", &OpDesc::SetInput) - .def("set_output", &OpDesc::SetOutput) - .def("input_arg_names", &OpDesc::InputArgumentNames) - .def("output_arg_names", &OpDesc::OutputArgumentNames) - .def("rename_input", &OpDesc::RenameInput) - .def("rename_output", &OpDesc::RenameOutput) - .def("has_attr", &OpDesc::HasAttr) - .def("attr_type", &OpDesc::GetAttrType) - .def("attr_names", &OpDesc::AttrNames) - .def("set_attr", &OpDesc::SetAttr) - .def("attr", &OpDesc::GetAttr) - .def("set_block_attr", &OpDesc::SetBlockAttr) + .def("__init__", [](pd::OpDesc &self) { new (&self) pd::OpDesc(); }, + pybind11::return_value_policy::reference) + .def("copy_from", &pd::OpDesc::CopyFrom) + .def("type", &pd::OpDesc::Type) + .def("set_type", &pd::OpDesc::SetType) + .def("input", &pd::OpDesc::Input) + .def("input_names", &pd::OpDesc::InputNames) + .def("output", &pd::OpDesc::Output) + .def("output_names", &pd::OpDesc::OutputNames) + .def("set_input", &pd::OpDesc::SetInput) + .def("set_output", &pd::OpDesc::SetOutput) + .def("input_arg_names", &pd::OpDesc::InputArgumentNames) + .def("output_arg_names", &pd::OpDesc::OutputArgumentNames) + .def("rename_input", &pd::OpDesc::RenameInput) + .def("rename_output", &pd::OpDesc::RenameOutput) + .def("has_attr", &pd::OpDesc::HasAttr) + .def("attr_type", &pd::OpDesc::GetAttrType) + .def("attr_names", &pd::OpDesc::AttrNames) + .def("set_attr", &pd::OpDesc::SetAttr) + .def("attr", &pd::OpDesc::GetAttr) + .def("set_block_attr", &pd::OpDesc::SetBlockAttr) .def("set_serialized_attr", - [](OpDesc &self, const std::string &name, - const py::bytes &seriralized) { + [](pd::OpDesc &self, const std::string &name, + const pybind11::bytes &seriralized) { std::string ser(seriralized); self.SetAttr(name, ser); }) - .def("block_attr", &OpDesc::GetBlockAttr) - .def("check_attrs", &OpDesc::CheckAttrs) - .def("infer_shape", &OpDesc::InferShape) - .def("infer_var_type", &OpDesc::InferVarType) - .def("serialize_to_string", SerializeMessage) - .def("block", &OpDesc::Block, py::return_value_policy::reference); + .def("block_attr", &pd::OpDesc::GetBlockAttr) + .def("check_attrs", &pd::OpDesc::CheckAttrs) + .def("infer_shape", &pd::OpDesc::InferShape) + .def("infer_var_type", &pd::OpDesc::InferVarType) + .def("serialize_to_string", SerializeMessage) + .def("block", &pd::OpDesc::Block, + pybind11::return_value_policy::reference); } } // namespace pybind diff --git a/paddle/fluid/pybind/protobuf.h b/paddle/fluid/pybind/protobuf.h index d0dc8936b3df50ca12315f113fbb36b0f98bb53f..e7370672a88fcf9238cc88c6aae65c6ee643746b 100644 --- a/paddle/fluid/pybind/protobuf.h +++ b/paddle/fluid/pybind/protobuf.h @@ -11,25 +11,25 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include + #include #include + #include "paddle/fluid/platform/variant.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" -namespace py = pybind11; - namespace paddle { namespace pybind { -void BindProgramDesc(py::module& m); -void BindBlockDesc(py::module& m); -void BindVarDsec(py::module& m); -void BindOpDesc(py::module& m); +void BindProgramDesc(pybind11::module* m); +void BindBlockDesc(pybind11::module* m); +void BindVarDsec(pybind11::module* m); +void BindOpDesc(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 6c05442466f5f3d8e04a8f0a2206443b1007a107..a1e8ff6399f0812773a7bb753c90e4400b1763d9 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -11,12 +11,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - -#include "paddle/fluid/pybind/protobuf.h" - -#include // for call_once +#include +#include +#include +#include // NOLINT // for call_once +#include #include -#include "paddle/fluid/framework/backward.h" +#include +#include + #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" @@ -25,18 +28,18 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/parallel_executor.h" #include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/operators/cond_op.h" -#include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/pybind/exception.h" -#include "paddle/fluid/pybind/pybind.h" +#include "paddle/fluid/pybind/protobuf.h" +#include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/fluid/pybind/recordio.h" #include "paddle/fluid/pybind/tensor_py.h" @@ -68,7 +71,7 @@ PYBIND11_PLUGIN(core) { // not cause namespace pollution. using namespace paddle::framework; // NOLINT - BindException(m); + BindException(&m); py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( @@ -99,6 +102,14 @@ PYBIND11_PLUGIN(core) { [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) + .def("alloc_int", + [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { + self.mutable_data(place); + }) + .def("alloc_float", + [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { + self.mutable_data(place); + }) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) @@ -112,6 +123,12 @@ PYBIND11_PLUGIN(core) { .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("set_float_element", TensorSetElement) @@ -219,11 +236,6 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference) #endif - .def("get_net", - [](Variable &self) -> operators::NetOp * { - return self.GetMutable(); - }, - py::return_value_policy::reference) .def("get_reader", [](Variable &self) -> framework::ReaderHolder * { PADDLE_ENFORCE(self.IsType()); @@ -232,7 +244,6 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Reader", "") - .def("has_next", &framework::ReaderHolder::HasNext) .def("reset", &framework::ReaderHolder::ReInit); py::class_(m, "Scope", "") @@ -316,7 +327,17 @@ All parameter, weight, gradient are variables in Paddle. #else return new paddle::platform::CUDADeviceContext(place); #endif - }); + }) + .def_static("create", + [](paddle::platform::CUDAPinnedPlace& place) + -> paddle::platform::DeviceContext* { +#ifndef PADDLE_WITH_CUDA + PADDLE_THROW( + "CUDAPinnedPlace is not supported in CPU device."); +#else + return new paddle::platform::CUDAPinnedDeviceContext(place); +#endif + });; // clang-format on #ifdef PADDLE_WITH_CUDA py::class_(m, "Communicator").def(py::init<>()); @@ -329,6 +350,10 @@ All parameter, weight, gradient are variables in Paddle. .def(py::init<>()) .def("__str__", string::to_string); + py::class_(m, "CUDAPinnedPlace") + .def(py::init<>()) + .def("__str__", string::to_string); + py::class_(m, "Place") .def(py::init<>()) .def("set_place", @@ -338,7 +363,11 @@ All parameter, weight, gradient are variables in Paddle. .def("set_place", [](platform::Place &self, const platform::CUDAPlace &gpu_place) { self = gpu_place; - }); + }) + .def("set_place", [](platform::Place &self, + const platform::CUDAPinnedPlace &cuda_pinned_place) { + self = cuda_pinned_place; + }); py::class_(m, "Operator") .def_static("create", @@ -351,17 +380,17 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) - .def("backward", - [](const OperatorBase &forwardOp, - const std::unordered_set &no_grad_vars) { - return Backward(forwardOp, no_grad_vars).release(); - }) .def("run", [](OperatorBase &self, const Scope &scope, const platform::CPUPlace &place) { self.Run(scope, place); }) .def("run", [](OperatorBase &self, const Scope &scope, const platform::CUDAPlace &place) { self.Run(scope, place); }) + .def("run", + [](OperatorBase &self, const Scope &scope, + const platform::CUDAPinnedPlace &place) { + self.Run(scope, place); + }) .def("type", [](const OperatorBase &op) -> std::string { return op.Type(); }) .def("outputs", @@ -378,42 +407,6 @@ All parameter, weight, gradient are variables in Paddle. [](const OperatorBase &op) { return op.OutputVars(false); }) .def("support_gpu", &OperatorBase::SupportGPU); - py::class_(m, "Net") - .def_static("create", - []() -> operators::NetOp * { - auto *retv = new operators::NetOp; - retv->SetType("plain_net"); - return retv; - }) - .def("append_op", [](operators::NetOp &self, - const OperatorBase &op) { self.AppendOp(op); }) - .def("complete_add_op", &operators::NetOp::CompleteAddOp) - .def("complete_add_op", [](std::shared_ptr &self) { - self->CompleteAddOp(); - }); - - // cond_op - py::class_(m, "CondOp") - .def_static("create", - [](py::bytes protobin) -> operators::CondOp * { - proto::OpDesc desc; - PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), - "Cannot parse user input to OpDesc"); - PADDLE_ENFORCE(desc.IsInitialized(), - "User OpDesc is not initialized, reason %s", - desc.InitializationErrorString()); - auto cond_op = OpRegistry::CreateOp(desc); - return static_cast(cond_op.release()); - }) - .def("set_truenet", - [](operators::CondOp &self, const operators::NetOp &net) -> void { - self.set_truenet(net.Clone()); - }) - .def("set_falsenet", - [](operators::CondOp &self, const operators::NetOp &net) -> void { - self.set_falsenet(net.Clone()); - }); - py::class_(m, "Executor") .def(py::init()) .def("run", @@ -422,7 +415,8 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_gflags", framework::InitGflags); m.def("init_glog", framework::InitGLOG); - m.def("init_devices", &framework::InitDevices); + m.def("init_devices", + [](bool init_p2p) { framework::InitDevices(init_p2p); }); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); #ifdef PADDLE_WITH_CUDA @@ -435,11 +429,11 @@ All parameter, weight, gradient are variables in Paddle. m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); - BindProgramDesc(m); - BindBlockDesc(m); - BindVarDsec(m); - BindOpDesc(m); - BindConstValue(m); + BindProgramDesc(&m); + BindBlockDesc(&m); + BindVarDsec(&m); + BindOpDesc(&m); + BindConstValue(&m); py::class_(m, "LodRankTable") .def("items", [](framework::LoDRankTable &table) { @@ -496,7 +490,29 @@ All parameter, weight, gradient are variables in Paddle. m.def("disable_profiler", platform::DisableProfiler); m.def("reset_profiler", platform::ResetProfiler); - BindRecordIOWriter(m); + py::class_(m, "ParallelExecutor") + .def("__init__", + [](ParallelExecutor &self, size_t num_threads, bool use_event, + const std::vector &places, + const std::unordered_set ¶ms, + const std::unordered_set &bcast_vars, + const ProgramDesc &main_program, const std::string &loss_var_name, + Scope *scope, std::vector &local_scopes, + bool allow_op_delay) { + new (&self) + ParallelExecutor(num_threads, use_event, places, params, + bcast_vars, main_program, loss_var_name, + scope, local_scopes, allow_op_delay); + }) + .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) + .def("local_scopes", + [](ParallelExecutor &self) -> std::vector * { + return &self.GetLocalScopes(); + }, + py::return_value_policy::reference) + .def("run", &ParallelExecutor::Run); + + BindRecordIOWriter(&m); return m.ptr(); } } // namespace pybind diff --git a/paddle/fluid/pybind/recordio.cc b/paddle/fluid/pybind/recordio.cc index 16f8bfb1a2e3a840670594d3cc2970e690dce891..330d104e0a774d905e463566f85bd2e64a080190 100644 --- a/paddle/fluid/pybind/recordio.cc +++ b/paddle/fluid/pybind/recordio.cc @@ -13,13 +13,19 @@ // limitations under the License. #include "paddle/fluid/pybind/recordio.h" + #include +#include +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/recordio/writer.h" namespace paddle { namespace pybind { +namespace { + class RecordIOWriter { public: RecordIOWriter(const std::string& filename, recordio::Compressor compressor, @@ -33,7 +39,7 @@ class RecordIOWriter { void CompleteAppendTensor() { auto& ctx = *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); - framework::WriteToRecordIO(writer_, tensors_, ctx); + framework::WriteToRecordIO(&writer_, tensors_, ctx); tensors_.clear(); } @@ -49,8 +55,10 @@ class RecordIOWriter { recordio::Writer writer_; }; -void BindRecordIOWriter(py::module& m) { - py::class_ writer(m, "RecordIOWriter", ""); +} // namespace + +void BindRecordIOWriter(py::module* m) { + py::class_ writer(*m, "RecordIOWriter", ""); py::enum_(writer, "Compressor", "") .value("Snappy", recordio::Compressor::kSnappy) .value("NoCompress", recordio::Compressor::kNoCompress); diff --git a/paddle/fluid/pybind/recordio.h b/paddle/fluid/pybind/recordio.h index 60e6a9e8595614b38375fca8c13d520739af9aaf..2555f9b719af8f73fbac10d92b890afd99fac290 100644 --- a/paddle/fluid/pybind/recordio.h +++ b/paddle/fluid/pybind/recordio.h @@ -21,6 +21,7 @@ namespace py = pybind11; namespace paddle { namespace pybind { -extern void BindRecordIOWriter(py::module& m); +void BindRecordIOWriter(py::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 6f8c597f8e610594851c318c122563523e4e7ea6..4a9dbd324c90380e784cc9457845fabd858585be 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -13,7 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device_context.h" @@ -21,12 +24,8 @@ limitations under the License. */ #include "pybind11/numpy.h" #include "pybind11/pybind11.h" -namespace py = pybind11; - namespace paddle { - namespace pybind { - namespace details { template @@ -34,16 +33,16 @@ struct CastToPyBufferImpl; template struct CastToPyBufferImpl { - py::buffer_info operator()(framework::Tensor &tensor) { + pybind11::buffer_info operator()(const framework::Tensor &tensor) { PADDLE_THROW("This type of tensor cannot be expose to Python"); - return py::buffer_info(); + return pybind11::buffer_info(); } }; template struct CastToPyBufferImpl { using CUR_TYPE = typename std::tuple_element>::type; - py::buffer_info operator()(framework::Tensor &tensor) { + pybind11::buffer_info operator()(const framework::Tensor &tensor) { if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) { auto dim_vec = framework::vectorize(tensor.dims()); std::vector dims_outside; @@ -82,15 +81,15 @@ struct CastToPyBufferImpl { if (std::type_index(typeid(CUR_TYPE)) == std::type_index(typeid(platform::float16))) { - return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), - "e", /* np.dtype('e') == np.float16 */ - (size_t)framework::arity(dst_tensor.dims()), - dims_outside, strides); + return pybind11::buffer_info( + dst_tensor.data(), sizeof(CUR_TYPE), + "e", /* np.dtype('e') == np.float16 */ + (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } else { - return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), - py::format_descriptor::format(), - (size_t)framework::arity(dst_tensor.dims()), - dims_outside, strides); + return pybind11::buffer_info( + dst_tensor.data(), sizeof(CUR_TYPE), + pybind11::format_descriptor::format(), + (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } } else { constexpr bool less = I + 1 < std::tuple_size>::value; @@ -101,7 +100,7 @@ struct CastToPyBufferImpl { } // namespace details -inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { +inline pybind11::buffer_info CastToPyBuffer(const framework::Tensor &tensor) { auto buffer_info = details::CastToPyBufferImpl()(tensor); @@ -109,7 +108,7 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { } template -T TensorGetElement(framework::Tensor &self, size_t offset) { +T TensorGetElement(const framework::Tensor &self, size_t offset) { if (platform::is_cpu_place(self.place())) { return self.data()[offset]; } else { @@ -121,64 +120,70 @@ T TensorGetElement(framework::Tensor &self, size_t offset) { // TODO(dzhwinter) : fix the redundent Tensor allocate and free template -void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { - if (platform::is_gpu_place(self.place())) { +void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { + if (platform::is_gpu_place(self->place())) { std::shared_ptr dst(new framework::Tensor); - framework::TensorCopy(self, platform::CPUPlace(), dst.get()); + framework::TensorCopy(*self, platform::CPUPlace(), dst.get()); dst->data()[offset] = elem; - framework::TensorCopy(*dst.get(), self.place(), &self); + framework::TensorCopy(*dst.get(), self->place(), self); - } else if (platform::is_cpu_place(self.place())) { - self.data()[offset] = elem; + } else if (platform::is_cpu_place(self->place())) { + self->data()[offset] = elem; } } template void PyCPUTensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CPUPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CPUPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); std::memcpy(dst, array.data(), sizeof(T) * array.size()); } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCPUTensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CPUPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CPUPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); } #ifdef PADDLE_WITH_CUDA template void PyCUDATensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CUDAPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CUDAPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto dev_ctx = @@ -188,18 +193,22 @@ void PyCUDATensorSetFromArray( } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCUDATensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CUDAPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CUDAPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto dev_ctx = @@ -208,6 +217,43 @@ void PyCUDATensorSetFromArray( sizeof(uint16_t) * array.size(), cudaMemcpyHostToDevice, dev_ctx->stream()); } + +template +void PyCUDAPinnedTensorSetFromArray( + framework::Tensor *self, + pybind11::array_t + array, + const paddle::platform::CUDAPinnedPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back(static_cast(array.shape()[i])); + } + + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); + std::memcpy(dst, array.data(), sizeof(T) * array.size()); +} + +template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. +void PyCUDAPinnedTensorSetFromArray( + framework::Tensor *self, + pybind11::array_t + array, + const paddle::platform::CUDAPinnedPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back(static_cast(array.shape()[i])); + } + + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); + std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); +} #endif } // namespace pybind diff --git a/paddle/fluid/pybind/tensor_py_test.cc b/paddle/fluid/pybind/tensor_py_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..1a0ae1d65833b1097bf69befe05884cab1317a89 --- /dev/null +++ b/paddle/fluid/pybind/tensor_py_test.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pybind/tensor_py.h" + +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/tensor.h" + +TEST(TensorPy, CastToPyBufferImpl) { + typedef int ElemType; + + paddle::framework::Tensor t; + auto d = paddle::framework::make_ddim({1, 2, 3}); + int* p = t.mutable_data(d, paddle::platform::CPUPlace()); + for (int i = 0; i < paddle::framework::product(d); ++i) { + p[i] = i; + } + + pybind11::buffer_info bi = paddle::pybind::CastToPyBuffer(t); + EXPECT_EQ(bi.itemsize, static_cast(sizeof(ElemType))); + EXPECT_EQ(bi.size, static_cast(paddle::framework::product(d))); + EXPECT_EQ(bi.ndim, static_cast(3)); // 3-dimensional as d. + EXPECT_EQ(bi.shape.size(), 3U); // as Dim d. + EXPECT_EQ(bi.shape[0], static_cast(1)); + EXPECT_EQ(bi.shape[1], static_cast(2)); + EXPECT_EQ(bi.shape[2], static_cast(3)); + EXPECT_EQ(bi.strides.size(), 3U); // 3-dimensional as d. + EXPECT_EQ(bi.strides[2], static_cast(sizeof(ElemType))); + EXPECT_EQ(bi.strides[1], static_cast(sizeof(ElemType) * 3)); + EXPECT_EQ(bi.strides[0], static_cast(sizeof(ElemType) * 2 * 3)); +} diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index 187a6a4ea7bd9d3a8ae48fa262e18f71b0f7d20d..82d9aa601cf450b8f90573d6c582bb12ced7a48a 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -14,11 +14,13 @@ #include "paddle/fluid/recordio/chunk.h" +#include +#include #include #include + #include "paddle/fluid/platform/enforce.h" #include "snappystream.hpp" -#include "zlib.h" namespace paddle { namespace recordio { @@ -58,8 +60,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) { * Copy stream in to another stream */ static void PipeStream(std::istream& in, std::ostream& os) { - ReadStreamByBuf( - in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); }); + ReadStreamByBuf(in, 0, + [&os](const char* buf, size_t len) { os.write(buf, len); }); } /** @@ -68,8 +70,8 @@ static void PipeStream(std::istream& in, std::ostream& os) { static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) { uint32_t crc = static_cast(crc32(0, nullptr, 0)); ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) { - crc = static_cast(crc32( - crc, reinterpret_cast(buf), static_cast(len))); + crc = static_cast(crc32(crc, reinterpret_cast(buf), + static_cast(len))); }); return crc; } diff --git a/paddle/fluid/recordio/chunk.h b/paddle/fluid/recordio/chunk.h index bf20ebd455c26ddeebeeea8db04cf7103b0c085f..71a1556a33bfa5c937d6a799d2818cd5a5ef2094 100644 --- a/paddle/fluid/recordio/chunk.h +++ b/paddle/fluid/recordio/chunk.h @@ -24,7 +24,7 @@ namespace recordio { // A Chunk contains the Header and optionally compressed records. class Chunk { -public: + public: Chunk() : num_bytes_(0) {} void Add(const std::string& buf) { num_bytes_ += buf.size(); @@ -46,7 +46,7 @@ public: bool Empty() const { return records_.empty(); } -private: + private: std::vector records_; // sum of record lengths in bytes. size_t num_bytes_; diff --git a/paddle/fluid/recordio/chunk_test.cc b/paddle/fluid/recordio/chunk_test.cc index 1f0e36a14d373ca96167199d4582bc8f17290ae8..5177475c016097d9a118aa79f855672354b3ef53 100644 --- a/paddle/fluid/recordio/chunk_test.cc +++ b/paddle/fluid/recordio/chunk_test.cc @@ -18,32 +18,30 @@ #include "gtest/gtest.h" -using namespace paddle::recordio; - TEST(Chunk, SaveLoad) { - Chunk ch; + paddle::recordio::Chunk ch; ch.Add(std::string("12345", 6)); ch.Add(std::string("123", 4)); std::stringstream ss; - ch.Write(ss, Compressor::kNoCompress); + ch.Write(ss, paddle::recordio::Compressor::kNoCompress); ss.seekg(0); ch.Parse(ss); ASSERT_EQ(ch.NumBytes(), 10U); } TEST(Chunk, Compressor) { - Chunk ch; + paddle::recordio::Chunk ch; ch.Add(std::string("12345", 6)); ch.Add(std::string("123", 4)); ch.Add(std::string("123", 4)); ch.Add(std::string("123", 4)); std::stringstream ss; - ch.Write(ss, Compressor::kSnappy); + ch.Write(ss, paddle::recordio::Compressor::kSnappy); std::stringstream ss2; - ch.Write(ss2, Compressor::kNoCompress); + ch.Write(ss2, paddle::recordio::Compressor::kNoCompress); ASSERT_LE(ss.tellp(), ss2.tellp()); // Compress should contain less data; ch.Clear(); ch.Parse(ss); - ASSERT_EQ(ch.NumBytes(), 18); + ASSERT_EQ(ch.NumBytes(), 18ul); } diff --git a/paddle/fluid/recordio/header.cc b/paddle/fluid/recordio/header.cc index ed09d58f6a3e2dba50bf4407c0463480575b248e..c4822329a43a79adc81f0b0cf145b22661ac6f50 100644 --- a/paddle/fluid/recordio/header.cc +++ b/paddle/fluid/recordio/header.cc @@ -13,6 +13,9 @@ // limitations under the License. #include "paddle/fluid/recordio/header.h" + +#include + #include "paddle/fluid/platform/enforce.h" namespace paddle { diff --git a/paddle/fluid/recordio/header.h b/paddle/fluid/recordio/header.h index 9200ac090de4514bef3704ac502039222eef2284..245425990b93a90d7ac6b233cff54feb48308d48 100644 --- a/paddle/fluid/recordio/header.h +++ b/paddle/fluid/recordio/header.h @@ -37,7 +37,7 @@ enum class Compressor : uint32_t { // Header is the metadata of Chunk class Header { -public: + public: Header(); Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs); @@ -51,7 +51,7 @@ public: Compressor CompressType() const { return compressor_; } uint32_t CompressSize() const { return compress_size_; } -private: + private: uint32_t num_records_; uint32_t checksum_; Compressor compressor_; diff --git a/paddle/fluid/recordio/header_test.cc b/paddle/fluid/recordio/header_test.cc index a7d627c3eb4a7af1954795f77e5f24739edadae8..00f1887dc5e1188829ef4cd42754d161f041656d 100644 --- a/paddle/fluid/recordio/header_test.cc +++ b/paddle/fluid/recordio/header_test.cc @@ -18,14 +18,12 @@ #include "gtest/gtest.h" -using namespace paddle::recordio; - TEST(Recordio, ChunkHead) { - Header hdr(0, 1, Compressor::kGzip, 3); + paddle::recordio::Header hdr(0, 1, paddle::recordio::Compressor::kGzip, 3); std::stringstream ss; hdr.Write(ss); ss.seekg(0, std::ios::beg); - Header hdr2; + paddle::recordio::Header hdr2; hdr2.Parse(ss); EXPECT_TRUE(hdr == hdr2); } diff --git a/paddle/fluid/recordio/scanner.cc b/paddle/fluid/recordio/scanner.cc index c22281dc97e05173ad76ce76959833b92f11c4ee..88b4d4001bc1b6dc935a9aabc2db5edfb55a60e4 100644 --- a/paddle/fluid/recordio/scanner.cc +++ b/paddle/fluid/recordio/scanner.cc @@ -13,10 +13,14 @@ // limitations under the License. #include "paddle/fluid/recordio/scanner.h" + +#include + #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace recordio { + Scanner::Scanner(std::unique_ptr &&stream) : stream_(std::move(stream)) { Reset(); diff --git a/paddle/fluid/recordio/scanner.h b/paddle/fluid/recordio/scanner.h index f3f17b69f195ddd92f5a39ead9755a7b8e2dd329..34f1b0c78d6b5af6072a993579e1866d38c6d009 100644 --- a/paddle/fluid/recordio/scanner.h +++ b/paddle/fluid/recordio/scanner.h @@ -16,12 +16,15 @@ #include #include +#include + #include "paddle/fluid/recordio/chunk.h" + namespace paddle { namespace recordio { class Scanner { -public: + public: explicit Scanner(std::unique_ptr&& stream); explicit Scanner(const std::string& filename); @@ -32,7 +35,7 @@ public: bool HasNext() const; -private: + private: std::unique_ptr stream_; Chunk cur_chunk_; size_t offset_; diff --git a/paddle/fluid/recordio/writer.cc b/paddle/fluid/recordio/writer.cc index 196d66edff8cc6000afcd74fb945c05dcab7106a..8046f4ff7896c897ebe1de2e2bb231cad5a0e410 100644 --- a/paddle/fluid/recordio/writer.cc +++ b/paddle/fluid/recordio/writer.cc @@ -12,9 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/recordio/writer.h" + +#include + #include "paddle/fluid/platform/enforce.h" + namespace paddle { namespace recordio { + void Writer::Write(const std::string& record) { cur_chunk_.Add(record); if (cur_chunk_.NumRecords() >= max_num_records_in_chunk_) { diff --git a/paddle/fluid/recordio/writer.h b/paddle/fluid/recordio/writer.h index 0c478d507547b10b8ebaaf5e512557a5c8c13e65..ac7e50ee90e6e8671d68e0d8065e0cf06c819ad0 100644 --- a/paddle/fluid/recordio/writer.h +++ b/paddle/fluid/recordio/writer.h @@ -11,16 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - #pragma once + +#include + #include "paddle/fluid/recordio/chunk.h" namespace paddle { namespace recordio { class Writer { -public: - Writer(std::ostream* sout, - Compressor compressor, + public: + Writer(std::ostream* sout, Compressor compressor, size_t max_num_records_in_chunk = 1000) : stream_(*sout), max_num_records_in_chunk_(max_num_records_in_chunk), @@ -32,7 +33,7 @@ public: ~Writer(); -private: + private: std::ostream& stream_; size_t max_num_records_in_chunk_; Chunk cur_chunk_; diff --git a/paddle/fluid/recordio/writer_scanner_test.cc b/paddle/fluid/recordio/writer_scanner_test.cc index 7e764f0d9439709ad101af2b8864dc0158bd359b..6583df21a20e9e034adc14b1d3eeb136899d659e 100644 --- a/paddle/fluid/recordio/writer_scanner_test.cc +++ b/paddle/fluid/recordio/writer_scanner_test.cc @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include +#include + +#include "gtest/gtest.h" #include "paddle/fluid/recordio/scanner.h" #include "paddle/fluid/recordio/writer.h" @@ -66,4 +67,4 @@ TEST(WriterScanner, TinyChunk) { ASSERT_EQ(scanner.Next(), "DEFG"); ASSERT_FALSE(scanner.HasNext()); } -} \ No newline at end of file +} diff --git a/paddle/fluid/string/.clang-format b/paddle/fluid/string/.clang-format deleted file mode 120000 index 7d28cb3924707d39dafe20f4664fb17b5538996c..0000000000000000000000000000000000000000 --- a/paddle/fluid/string/.clang-format +++ /dev/null @@ -1 +0,0 @@ -../framework/.clang-format \ No newline at end of file diff --git a/paddle/fluid/string/piece.cc b/paddle/fluid/string/piece.cc index 454f5d8d38c5f02598cddaab555334a1e8a398da..8e8cfb0e91389490895835ed09ef36adf756d3ca 100644 --- a/paddle/fluid/string/piece.cc +++ b/paddle/fluid/string/piece.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "piece.h" +#include "paddle/fluid/string/piece.h" #include diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 693cf9d6dfeea0735801e64fe74b9770c258c553..062095a1c3e977c0bcc89346ead765acb023bcf7 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -71,6 +71,8 @@ #include #include +#include + #include "tinyformat/tinyformat.h" // https://github.com/c42f/tinyformat namespace paddle { diff --git a/paddle/fluid/string/printf_test.cc b/paddle/fluid/string/printf_test.cc index b6a60c8d6b7f15f8e5572cf5bb1e7f04ee1c1598..678029f93534ab374bd29083f8991d632ccdd5a1 100644 --- a/paddle/fluid/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -11,7 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "printf.h" + +#include "paddle/fluid/string/printf.h" #include @@ -21,7 +22,7 @@ TEST(StringPrintf, StringPrintf) { std::string weekday = "Wednesday"; const char* month = "July"; size_t day = 27; - long hour = 14; + int hour = 14; int min = 44; EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, diff --git a/paddle/fluid/string/to_string_test.cc b/paddle/fluid/string/to_string_test.cc index 8fc293af0e473994ac13f6615d3f6195c8c5f04c..1d9c0e5e0c2b6e7f44c1622d2828b21b0a4380ee 100644 --- a/paddle/fluid/string/to_string_test.cc +++ b/paddle/fluid/string/to_string_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "to_string.h" +#include "paddle/fluid/string/to_string.h" #include constexpr char kOutputString[] = "User Defined Output"; @@ -26,14 +26,13 @@ std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) { } TEST(to_string, normal) { - using namespace paddle::string; + using paddle::string::to_string; ASSERT_EQ("10", to_string(10)); ASSERT_EQ("abc", to_string("abc")); ASSERT_EQ("1.2", to_string(1.2)); } TEST(to_string, user_defined) { - using namespace paddle::string; UserDefinedClass instance; - ASSERT_EQ(kOutputString, to_string(instance)); + ASSERT_EQ(kOutputString, paddle::string::to_string(instance)); } diff --git a/paddle/gserver/layers/UpsampleLayer.cpp b/paddle/gserver/layers/UpsampleLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ff5332e6401acc3a28c9808fddd4812a7323544 --- /dev/null +++ b/paddle/gserver/layers/UpsampleLayer.cpp @@ -0,0 +1,108 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and + limitations under the License. */ + +#include "UpsampleLayer.h" +#include "iostream" + +namespace paddle { + +REGISTER_LAYER(upsample, UpsampleLayer); + +size_t UpsampleLayer::getOutputSize() { + if (upsampleSize_ == 0) { + upsampleSize_ = imgSize_ * scale_ - static_cast(padOutX_); + upsampleSizeY_ = imgSizeY_ * scaleY_ - static_cast(padOutY_); + } + return upsampleSize_ * upsampleSizeY_ * channels_; +} + +bool UpsampleLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + Layer::init(layerMap, parameterMap); + + CHECK_EQ(inputLayers_.size(), 2U); + CHECK_EQ(config_.inputs_size(), 2); + const auto& conf = config_.inputs(0).upsample_conf(); + const auto& img_conf = conf.image_conf(); + + imgSizeY_ = + img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size(); + imgSize_ = img_conf.img_size(); + channels_ = img_conf.channels(); + + CHECK((conf.has_upsample_size()) || (conf.has_scale())) + << "scale or upsample_size is required."; + + if (conf.has_upsample_size()) { + upsampleSize_ = conf.upsample_size(); + upsampleSizeY_ = upsampleSize_; + if (conf.has_upsample_size_y()) { + upsampleSizeY_ = conf.upsample_size_y(); + } + } else { + if (!conf.has_scale_y()) { + scale_ = scaleY_ = conf.scale_y(); + CHECK_GT(static_cast(scale_), 1); + } else { + scale_ = conf.scale(); + scaleY_ = conf.scale_y(); + } + padOutX_ = conf.pad_out_x(); + padOutY_ = conf.pad_out_y(); + CHECK(!padOutX_ || scale_ == 2) + << "Output height padding compensation requires scale_ == 2"; + CHECK(!padOutY_ || scaleY_ == 2) + << "Output width padding compensation requires scaleY_ == 2"; + upsampleSize_ = upsampleSizeY_ = 0; + } + return true; +} + +void UpsampleLayer::forward(PassType passType) { + Layer::forward(passType); + + MatrixPtr input = getInputValue(0); + MatrixPtr mask = inputLayers_[1]->getOutput("mask").value; + + size_t batchSize = input->getHeight(); + size_t outSize = getOutputSize(); + + CHECK_EQ(input->getWidth(), mask->getWidth()); + CHECK_EQ(mask->getHeight(), batchSize); + resetOutput(batchSize, outSize); + + MatrixPtr output = getOutputValue(); + output->upsampleForward(*input, + *mask, + imgSize_, + imgSizeY_, + channels_, + upsampleSize_, + upsampleSizeY_); +} + +void UpsampleLayer::backward(const UpdateCallback& callback) { + MatrixPtr mask = inputLayers_[1]->getOutput("mask").value; + MatrixPtr inputGrad = getInputGrad(0); + MatrixPtr outputGrad = getOutputGrad(); + inputGrad->upsampleBackward(*outputGrad, + *mask, + imgSize_, + imgSizeY_, + channels_, + upsampleSize_, + upsampleSizeY_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/UpsampleLayer.h b/paddle/gserver/layers/UpsampleLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..25efbac5e9e6e92653f7c2b2f4dca9221737e5d6 --- /dev/null +++ b/paddle/gserver/layers/UpsampleLayer.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "Layer.h" +#include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +/** + * This layer transpose the pooling process. + * It takes two input, the first input is the input data, and + * the second is the mask data from the max-pool-with-mask layer. + * + */ + +class UpsampleLayer : public Layer { +public: + explicit UpsampleLayer(const LayerConfig& config) : Layer(config) {} + ~UpsampleLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forward(PassType passType) override; + void backward(const UpdateCallback& callback) override; + + size_t getOutputSize(); + +protected: + size_t scale_, scaleY_; + size_t upsampleSize_, upsampleSizeY_; + size_t padOutX_, padOutY_; + size_t imgSize_, imgSizeY_; + size_t channels_; +}; + +} // namespace paddle diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index b578a906c2027a1169a0098b93f8d0742920f99d..9d7cad7584d1defefe38bdd4d041b98bd9e45bf0 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -14,6 +14,11 @@ function(gserver_test TARGET) COMMAND ${TARGET}) endfunction() +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/concat_dotmul_a.conf + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/* ${CMAKE_CURRENT_BINARY_DIR} +) +add_custom_target(copy_gserver_conf ALL DEPENDS concat_dotmul_a.conf) + gserver_test(test_LayerGrad) gserver_test(test_CRFLayerGrad) gserver_test(test_CrossEntropyOverBeamGrad) @@ -27,15 +32,16 @@ gserver_test(test_BatchNorm) gserver_test(test_KmaxSeqScore) gserver_test(test_Expand) gserver_test(test_MaxPoolingWithMaskOutput) +gserver_test(test_Upsample) set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/gserver/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/gserver/tests) function(gserver_test_with_python TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/${TARGET} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endfunction() gserver_test_with_python(test_PyDataProvider2) @@ -56,7 +62,7 @@ if(WITH_MKLDNN) LayerGradUtil.cpp) add_test(NAME test_MKLDNN COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) endif() ############### test_WarpCTCLayer ####################### @@ -65,7 +71,7 @@ if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE) test_WarpCTCLayer.cpp) add_test(NAME test_WarpCTCLayer COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) endif() if(NOT MOBILE_INFERENCE) @@ -83,15 +89,15 @@ if(NOT MOBILE_INFERENCE) endif() add_test(NAME test_NetworkCompare COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=${use_gpu} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) ############ test_CompareSparse ################ add_unittest_without_exec(test_CompareSparse test_CompareSparse.cpp) if(NOT ON_TRAVIS) add_test(NAME test_CompareSparse - COMMAND ${PYTHON_PATH} ./.set_port.sh -p port -n 6 + COMMAND ${PYTHON_PATH} ${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port -n 6 ${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endif() endif() diff --git a/paddle/gserver/tests/test_Upsample.cpp b/paddle/gserver/tests/test_Upsample.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39b902fcc75e71007f855e4e258e54ed8d40f16b --- /dev/null +++ b/paddle/gserver/tests/test_Upsample.cpp @@ -0,0 +1,153 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "LayerGradUtil.h" +#include "paddle/math/MathUtils.h" +#include "paddle/testing/TestUtil.h" + +void setPoolConfig(paddle::TestConfig* config, + paddle::PoolConfig* pool, + const string& poolType) { + (*config).biasSize = 0; + (*config).layerConfig.set_type("pool"); + (*config).layerConfig.set_num_filters(1); + + int kw = 2, kh = 2; + int pw = 0, ph = 0; + int sw = 2, sh = 2; + pool->set_pool_type(poolType); + pool->set_channels(2); + pool->set_size_x(kw); + pool->set_size_y(kh); + pool->set_start(0); + pool->set_padding(pw); + pool->set_padding_y(ph); + pool->set_stride(sw); + pool->set_stride_y(sh); + + int ow = + paddle::outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false); + int oh = + paddle::outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false); + pool->set_output_x(ow); + pool->set_output_y(oh); +} + +paddle::LayerPtr doOneUpsampleTest(const paddle::MatrixPtr& inputMat, + const string& poolType, + bool use_gpu, + real* tempGradData) { + /* prepare maxPoolWithMaskLayer */ + paddle::TestConfig config; + config.inputDefs.push_back({paddle::INPUT_DATA, "layer_0", 128, 0}); + paddle::LayerInputConfig* input = config.layerConfig.add_inputs(); + paddle::PoolConfig* pool = input->mutable_pool_conf(); + + pool->set_img_size(8); + pool->set_img_size_y(8); + setPoolConfig(&config, pool, "max-pool-with-mask"); + config.layerConfig.set_size(pool->output_x() * pool->output_y() * + pool->channels()); + + config.layerConfig.set_name("MaxPoolWithMask"); + + std::vector dataLayers; + paddle::LayerMap layerMap; + vector datas; + + initDataLayer(config, + &dataLayers, + &datas, + &layerMap, + "MaxPoolWithMask", + 1, + false, + use_gpu); + + dataLayers[0]->getOutputValue()->copyFrom(*inputMat); + + FLAGS_use_gpu = use_gpu; + std::vector parameters; + paddle::LayerPtr maxPoolingWithMaskOutputLayer; + initTestLayer(config, &layerMap, ¶meters, &maxPoolingWithMaskOutputLayer); + maxPoolingWithMaskOutputLayer->forward(paddle::PASS_GC); + + /* prepare the upsample layer */ + paddle::LayerConfig upsampleLayerConfig; + upsampleLayerConfig.set_type("upsample"); + paddle::LayerInputConfig* input1 = upsampleLayerConfig.add_inputs(); + upsampleLayerConfig.add_inputs(); + + paddle::UpsampleConfig* upsampleConfig = input1->mutable_upsample_conf(); + upsampleConfig->set_scale(2); + paddle::ImageConfig* imageConfig = upsampleConfig->mutable_image_conf(); + imageConfig->set_channels(2); + imageConfig->set_img_size(4); + imageConfig->set_img_size_y(4); + upsampleLayerConfig.set_size(2 * 8 * 8); + upsampleLayerConfig.set_name("upsample"); + + for (size_t i = 0; i < 2; i++) { + paddle::LayerInputConfig& inputTemp = + *(upsampleLayerConfig.mutable_inputs(i)); + inputTemp.set_input_layer_name("MaxPoolWithMask"); + } + + paddle::LayerPtr upsampleLayer; + paddle::ParameterMap parameterMap; + upsampleLayer = paddle::Layer::create(upsampleLayerConfig); + layerMap[upsampleLayerConfig.name()] = upsampleLayer; + upsampleLayer->init(layerMap, parameterMap); + upsampleLayer->setNeedGradient(true); + upsampleLayer->forward(paddle::PASS_GC); + upsampleLayer->getOutputGrad()->copyFrom(tempGradData, 128); + upsampleLayer->backward(); + + return upsampleLayer; +} + +TEST(Layer, maxPoolingWithMaskOutputLayerFwd) { + bool useGpu = false; + paddle::MatrixPtr inputMat; + paddle::MatrixPtr inputGPUMat; + paddle::MatrixPtr tempGradMat; + + inputMat = paddle::Matrix::create(1, 128, false, useGpu); + inputMat->randomizeUniform(); + + tempGradMat = paddle::Matrix::create(1, 128, false, useGpu); + tempGradMat->randomizeUniform(); + real* tempGradData = tempGradMat->getData(); + + paddle::LayerPtr upsampleLayerCPU = + doOneUpsampleTest(inputMat, "max-pool-with-mask", useGpu, tempGradData); + +#ifdef PADDLE_WITH_CUDA + useGpu = true; + real* data = inputMat->getData(); + inputGPUMat = paddle::Matrix::create(1, 128, false, useGpu); + inputGPUMat->copyFrom(data, 128); + paddle::LayerPtr upsampleLayerGPU = doOneUpsampleTest( + inputGPUMat, "max-pool-with-mask", useGpu, tempGradData); + paddle::checkMatrixEqual(upsampleLayerCPU->getOutput("").value, + upsampleLayerGPU->getOutput("").value); + + paddle::checkMatrixEqual(upsampleLayerCPU->getPrev(0)->getOutputGrad(), + upsampleLayerGPU->getPrev(0)->getOutputGrad()); +#endif +} diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 35359d4b5a8fb9715317257538a6e2e38fc16b60..0e84cb37392839d112448b0b3c12b042e7df838e 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1024,6 +1024,66 @@ void GpuMatrix::check(std::ostream& os, Matrix& refMat, bool printDiff) { LOG(INFO) << "the diffCnt is " << diffCnt; } +void GpuMatrix::upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + CHECK(input.useGpu_ == true) << "Matrix type are not equal"; + CHECK(mask.useGpu_ == true) << "Matrix type are not equal"; + + real* inputData = input.getData(); + real* maskData = mask.getData(); + real* outData = data_; + + size_t batch = input.getHeight(); + + CHECK(imgSizeH * imgSizeW * channels == input.getWidth()); + CHECK(imgSizeH * imgSizeW * channels == mask.getWidth()); + CHECK_EQ(batch, this->getHeight()); + CHECK(width_ == outputH * outputW * channels); + hl_upsample_forward(inputData, + maskData, + batch, + imgSizeH, + imgSizeW, + channels, + outputH, + outputW, + outData); +} + +void GpuMatrix::upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + CHECK(outputGrad.useGpu_ == true) << "Matrix type are not equal"; + CHECK(mask.useGpu_ == true) << "Matrix type are not equal"; + + real* outputGradData = outputGrad.getData(); + real* maskData = mask.getData(); + real* inputGradData = data_; + size_t batch = outputGrad.getHeight(); + + CHECK(imgSizeH * imgSizeW == this->getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outputH * outputW, outputGrad.getWidth()); + hl_upsample_backward(outputGradData, + maskData, + batch, + imgSizeH, + imgSizeW, + channels, + outputH, + outputW, + inputGradData); +} + void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, @@ -1986,6 +2046,72 @@ void CpuMatrix::inverse(MatrixPtr& matInv, bool memAlloc) { CHECK_EQ(info, 0); } +void CpuMatrix::upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + real* inputData = input.getData(); + real* maskData = mask.getData(); + real* outData = data_; + size_t inLength = imgSizeH * imgSizeW; + size_t outLength = outputH * outputW; + size_t batch = input.getHeight(); + CHECK(inLength == input.getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outLength, this->getWidth()); + + for (size_t k = 0; k < batch; k++) { + for (size_t c = 0; c < channels; c++) { + for (size_t i = 0; i < inLength; i++) { + size_t out_index = static_cast(maskData[i]); + if (out_index >= outLength) { + LOG(FATAL) << "upsample index " << out_index << " out of range."; + } + outData[out_index] = inputData[i]; + } + inputData += inLength; + maskData += inLength; + outData += outLength; + } + } +} + +void CpuMatrix::upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + real* outputGradData = outputGrad.getData(); + real* maskData = mask.getData(); + real* inputGradData = data_; + size_t inLength = imgSizeH * imgSizeW; + size_t outLength = outputH * outputW; + size_t batch = outputGrad.getHeight(); + CHECK(inLength == this->getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outLength, outputGrad.getWidth()); + + for (size_t k = 0; k < batch; k++) { + for (size_t c = 0; c < channels; c++) { + for (size_t i = 0; i < inLength; i++) { + size_t out_index = static_cast(maskData[i]); + if (out_index >= outLength) { + LOG(FATAL) << "upsample index " << out_index << " out of range."; + } + inputGradData[i] = outputGradData[out_index]; + } + inputGradData += inLength; + maskData += inLength; + outputGradData += outLength; + } + } +} + void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 631e69edc1b0f5c4ef4a115d4bd5ea29fc418018..04e9614eabc47c4c661ace2106e8ca96f45a1d49 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -859,6 +859,26 @@ public: LOG(FATAL) << "Not implemented"; } + virtual void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + LOG(FATAL) << "Not implemeted"; + } + + virtual void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + LOG(FATAL) << "Not implemeted"; + } + /** * Pooling forward operation, pick out the largest element * in the sizeX of value, if the maskMatP is not NULL, it will @@ -1420,6 +1440,22 @@ public: void classificationError(Matrix& output, IVector& label, size_t topkSize = 1); + void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + + void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, @@ -1694,6 +1730,22 @@ public: MatrixPtr clone(size_t height, size_t width, bool useGpu = false); + void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + + void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 322f72e4a58c7e8f2c26d994477cbb55551c595a..2b2a904974f3756576fb47851400e344c9357c57 100755 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -53,6 +53,7 @@ function cmake_gen() { -DWITH_FAST_BUNDLE_TEST=ON -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} ======================================== EOF # Disable UNITTEST_USE_VIRTUALENV in docker because @@ -78,6 +79,7 @@ EOF -DWITH_TESTING=${WITH_TESTING:-ON} \ -DWITH_FAST_BUNDLE_TEST=ON \ -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake \ + -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON } @@ -102,7 +104,9 @@ EOF # make install should also be test when unittest make install -j `nproc` pip install /usr/local/opt/paddle/share/wheels/*.whl - paddle version + if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]] ; then + paddle version + fi fi } @@ -123,9 +127,8 @@ EOF -DWITH_AVX=${WITH_AVX:-ON} \ -DWITH_SWIG_PY=ON \ -DWITH_STYLE_CHECK=OFF - make -j `nproc` gen_proto_py framework_py_proto - make -j `nproc` copy_paddle_pybind - make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs + + make -j `nproc` paddle_docs paddle_apis popd fi @@ -182,16 +185,24 @@ EOF NCCL_DEPS="" fi + if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]]; then + PADDLE_VERSION="paddle version" + CMD='"paddle", "version"' + else + PADDLE_VERSION="true" + CMD='"true"' + fi + cat >> /paddle/build/Dockerfile </dev/null | grep '^paddle' | sed 's/.*==//g'` +if [ "@WITH_GPU@" == "ON" ]; then + PADDLE_NAME="paddlepaddle-gpu" +else + PADDLE_NAME="paddlepaddle" +fi + +INSTALLED_VERSION=`pip freeze 2>/dev/null | grep "^${PADDLE_NAME}==" | sed 's/.*==//g'` -if [ -z ${INSTALLED_VERSION} ]; then +if [ -z "${INSTALLED_VERSION}" ]; then INSTALLED_VERSION="0.0.0" # not installed fi cat < ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so +add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so + COMMAND cmake -E copy $ ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so DEPENDS paddle_pybind) -add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so) +add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so) add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND touch stub.cc + COMMAND cp -r ${PADDLE_SOURCE_DIR}/python/paddle ${PADDLE_BINARY_DIR}/python + COMMAND cp -r ${PADDLE_SOURCE_DIR}/paddle/py_paddle ${PADDLE_BINARY_DIR}/python/ COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E remove_directory ${PADDLE_PYTHON_BUILD_DIR}/lib-python @@ -62,7 +64,7 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto profiler_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp ${MKL_DEPENDS}) -if(NOT WITH_FLUID) +if(NOT WITH_FLUID_ONLY) set(paddle_python_deps ${paddle_python_deps} paddle_pserver_main paddle_trainer paddle_merge_model) if(WITH_SWIG_PY) list(APPEND paddle_python_deps python_api_wheel) @@ -73,13 +75,15 @@ add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps}) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) if (WITH_TESTING) - if(NOT WITH_FLUID) + add_subdirectory(paddle/reader/tests) + add_subdirectory(paddle/dataset/tests) + if(NOT WITH_FLUID_ONLY) add_subdirectory(paddle/trainer_config_helpers/tests) if (WITH_SWIG_PY) # enable v2 API unittest only when paddle swig api is compiled add_subdirectory(paddle/v2/tests) - add_subdirectory(paddle/v2/reader/tests) add_subdirectory(paddle/v2/plot/tests) + add_subdirectory(paddle/v2/reader/tests) endif() endif() add_subdirectory(paddle/fluid/tests) diff --git a/python/paddle/.gitignore b/python/paddle/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..98527864664d32f798edc06a53131e8d5a068295 --- /dev/null +++ b/python/paddle/.gitignore @@ -0,0 +1 @@ +version.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 1030c94e16376c326cb8b32926b8c47625cd38f0..d1cf04161ae4444ebc7da7fbc20e37dafe6c0fb1 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -14,8 +14,14 @@ try: from version import full_version as __version__ from version import commit as __git_commit__ + except ImportError: import sys sys.stderr.write('''Warning with import paddle: you should not import paddle from the source directory; please install paddlepaddle*.whl firstly.''' ) + +import reader +import dataset +import batch +batch = batch.batch diff --git a/python/paddle/batch.py b/python/paddle/batch.py new file mode 100644 index 0000000000000000000000000000000000000000..317cf037c69f8639e3760fbfce20565127794fcb --- /dev/null +++ b/python/paddle/batch.py @@ -0,0 +1,41 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['batch'] + + +def batch(reader, batch_size): + """ + Create a batched reader. + + :param reader: the data reader to read from. + :type reader: callable + :param batch_size: size of each mini-batch + :type batch_size: int + :return: the batched reader. + :rtype: callable + """ + + def batch_reader(): + r = reader() + b = [] + for instance in r: + b.append(instance) + if len(b) == batch_size: + yield b + b = [] + if b: + yield b + + return batch_reader diff --git a/python/paddle/dataset/__init__.py b/python/paddle/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3315e826e82a33dfeb9c5223ce196cffb1ae7234 --- /dev/null +++ b/python/paddle/dataset/__init__.py @@ -0,0 +1,48 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Dataset package. +""" + +import mnist +import imikolov +import imdb +import cifar +import movielens +import conll05 +import uci_housing +import sentiment +import wmt14 +import wmt16 +import mq2007 +import flowers +import voc2012 +import image + +__all__ = [ + 'mnist', + 'imikolov', + 'imdb', + 'cifar', + 'movielens', + 'conll05', + 'sentiment', + 'uci_housing', + 'wmt14', + 'wmt16', + 'mq2007', + 'flowers', + 'voc2012', + 'image', +] diff --git a/python/paddle/dataset/cifar.py b/python/paddle/dataset/cifar.py new file mode 100644 index 0000000000000000000000000000000000000000..07f4dcbdab2fecf84a0a7042a48a8c8a9e5f880d --- /dev/null +++ b/python/paddle/dataset/cifar.py @@ -0,0 +1,139 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CIFAR dataset. + +This module will download dataset from +https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into +paddle reader creators. + +The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, +with 6000 images per class. There are 50000 training images and 10000 test +images. + +The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes +containing 600 images each. There are 500 training images and 100 testing +images per class. + +""" + +import cPickle +import itertools +import numpy +import paddle.dataset.common +import tarfile + +__all__ = ['train100', 'test100', 'train10', 'test10', 'convert'] + +URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/' +CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' +CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a' +CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz' +CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85' + + +def reader_creator(filename, sub_name): + def read_batch(batch): + data = batch['data'] + labels = batch.get('labels', batch.get('fine_labels', None)) + assert labels is not None + for sample, label in itertools.izip(data, labels): + yield (sample / 255.0).astype(numpy.float32), int(label) + + def reader(): + with tarfile.open(filename, mode='r') as f: + names = (each_item.name for each_item in f + if sub_name in each_item.name) + + for name in names: + batch = cPickle.load(f.extractfile(name)) + for item in read_batch(batch): + yield item + + return reader + + +def train100(): + """ + CIFAR-100 training set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 99]. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), + 'train') + + +def test100(): + """ + CIFAR-100 test set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Test reader creator. + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), + 'test') + + +def train10(): + """ + CIFAR-10 training set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'data_batch') + + +def test10(): + """ + CIFAR-10 test set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Test reader creator. + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'test_batch') + + +def fetch(): + paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5) + paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, train100(), 1000, "cifar_train100") + paddle.dataset.common.convert(path, test100(), 1000, "cifar_test100") + paddle.dataset.common.convert(path, train10(), 1000, "cifar_train10") + paddle.dataset.common.convert(path, test10(), 1000, "cifar_test10") diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py new file mode 100644 index 0000000000000000000000000000000000000000..68660601c161d2332b17b448fae089506238ba78 --- /dev/null +++ b/python/paddle/dataset/common.py @@ -0,0 +1,236 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import requests +import hashlib +import os +import errno +import shutil +import sys +import importlib +import paddle.dataset +import cPickle +import glob +import cPickle as pickle + +__all__ = [ + 'DATA_HOME', + 'download', + 'md5file', + 'split', + 'cluster_files_reader', + 'convert', +] + +DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') + + +# When running unit tests, there could be multiple processes that +# trying to create DATA_HOME directory simultaneously, so we cannot +# use a if condition to check for the existence of the directory; +# instead, we use the filesystem as the synchronization mechanism by +# catching returned errors. +def must_mkdirs(path): + try: + os.makedirs(DATA_HOME) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + pass + + +must_mkdirs(DATA_HOME) + + +def md5file(fname): + hash_md5 = hashlib.md5() + f = open(fname, "rb") + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) + f.close() + return hash_md5.hexdigest() + + +def download(url, module_name, md5sum, save_name=None): + dirname = os.path.join(DATA_HOME, module_name) + if not os.path.exists(dirname): + os.makedirs(dirname) + + filename = os.path.join(dirname, + url.split('/')[-1] + if save_name is None else save_name) + + retry = 0 + retry_limit = 3 + while not (os.path.exists(filename) and md5file(filename) == md5sum): + if os.path.exists(filename): + print "file md5", md5file(filename), md5sum + if retry < retry_limit: + retry += 1 + else: + raise RuntimeError("Cannot download {0} within retry limit {1}". + format(url, retry_limit)) + print "Cache file %s not found, downloading %s" % (filename, url) + r = requests.get(url, stream=True) + total_length = r.headers.get('content-length') + + if total_length is None: + with open(filename, 'w') as f: + shutil.copyfileobj(r.raw, f) + else: + with open(filename, 'w') as f: + dl = 0 + total_length = int(total_length) + for data in r.iter_content(chunk_size=4096): + dl += len(data) + f.write(data) + done = int(50 * dl / total_length) + sys.stdout.write("\r[%s%s]" % ('=' * done, + ' ' * (50 - done))) + sys.stdout.flush() + + return filename + + +def fetch_all(): + for module_name in filter(lambda x: not x.startswith("__"), + dir(paddle.dataset)): + if "fetch" in dir( + importlib.import_module("paddle.dataset.%s" % module_name)): + getattr( + importlib.import_module("paddle.dataset.%s" % module_name), + "fetch")() + + +def fetch_all_recordio(path): + for module_name in filter(lambda x: not x.startswith("__"), + dir(paddle.dataset)): + if "convert" in dir( + importlib.import_module("paddle.dataset.%s" % module_name)) and \ + not module_name == "common": + ds_path = os.path.join(path, module_name) + must_mkdirs(ds_path) + getattr( + importlib.import_module("paddle.dataset.%s" % module_name), + "convert")(ds_path) + + +def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): + """ + you can call the function as: + + split(paddle.dataset.cifar.train10(), line_count=1000, + suffix="imikolov-train-%05d.pickle") + + the output files as: + + |-imikolov-train-00000.pickle + |-imikolov-train-00001.pickle + |- ... + |-imikolov-train-00480.pickle + + :param reader: is a reader creator + :param line_count: line count for each file + :param suffix: the suffix for the output files, should contain "%d" + means the id for each file. Default is "%05d.pickle" + :param dumper: is a callable function that dump object to file, this + function will be called as dumper(obj, f) and obj is the object + will be dumped, f is a file object. Default is cPickle.dump. + """ + if not callable(dumper): + raise TypeError("dumper should be callable.") + lines = [] + indx_f = 0 + for i, d in enumerate(reader()): + lines.append(d) + if i >= line_count and i % line_count == 0: + with open(suffix % indx_f, "w") as f: + dumper(lines, f) + lines = [] + indx_f += 1 + if lines: + with open(suffix % indx_f, "w") as f: + dumper(lines, f) + + +def cluster_files_reader(files_pattern, + trainer_count, + trainer_id, + loader=cPickle.load): + """ + Create a reader that yield element from the given files, select + a file set according trainer count and trainer_id + + :param files_pattern: the files which generating by split(...) + :param trainer_count: total trainer count + :param trainer_id: the trainer rank id + :param loader: is a callable function that load object from file, this + function will be called as loader(f) and f is a file object. + Default is cPickle.load + """ + + def reader(): + if not callable(loader): + raise TypeError("loader should be callable.") + file_list = glob.glob(files_pattern) + file_list.sort() + my_file_list = [] + for idx, fn in enumerate(file_list): + if idx % trainer_count == trainer_id: + print "append file: %s" % fn + my_file_list.append(fn) + for fn in my_file_list: + with open(fn, "r") as f: + lines = loader(f) + for line in lines: + yield line + + return reader + + +def convert(output_path, reader, line_count, name_prefix): + import recordio + """ + Convert data from reader to recordio format files. + + :param output_path: directory in which output files will be saved. + :param reader: a data reader, from which the convert program will read + data instances. + :param name_prefix: the name prefix of generated files. + :param max_lines_to_shuffle: the max lines numbers to shuffle before + writing. + """ + + assert line_count >= 1 + indx_f = 0 + + def write_data(indx_f, lines): + filename = "%s/%s-%05d" % (output_path, name_prefix, indx_f) + writer = recordio.writer(filename) + for l in lines: + # FIXME(Yancey1989): + # dumps with protocol: pickle.HIGHEST_PROTOCOL + writer.write(cPickle.dumps(l)) + writer.close() + + lines = [] + for i, d in enumerate(reader()): + lines.append(d) + if i % line_count == 0 and i >= line_count: + write_data(indx_f, lines) + lines = [] + indx_f += 1 + continue + + write_data(indx_f, lines) diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py new file mode 100644 index 0000000000000000000000000000000000000000..4e94ce89892f8e6822c15fdc510805e75dfca988 --- /dev/null +++ b/python/paddle/dataset/conll05.py @@ -0,0 +1,254 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Conll05 dataset. +Paddle semantic role labeling Book and demo use this dataset as an example. +Because Conll05 is not free in public, the default downloaded URL is test set +of Conll05 (which is public). Users can change URL and MD5 to their Conll +dataset. And a pre-trained word vector model based on Wikipedia corpus is used +to initialize SRL model. +""" + +import tarfile +import gzip +import itertools +import paddle.dataset.common + +__all__ = ['test, get_dict', 'get_embedding', 'convert'] + +DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz' +DATA_MD5 = '387719152ae52d60422c016e92a742fc' +WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt' +WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa' +VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt' +VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c' +TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt' +TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751' +EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb' +EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' + +UNK_IDX = 0 + + +def load_label_dict(filename): + d = dict() + tag_dict = set() + with open(filename, 'r') as f: + for i, line in enumerate(f): + line = line.strip() + if line.startswith("B-"): + tag_dict.add(line[2:]) + elif line.startswith("I-"): + tag_dict.add(line[2:]) + index = 0 + for tag in tag_dict: + d["B-" + tag] = index + index += 1 + d["I-" + tag] = index + index += 1 + d["O"] = index + return d + + +def load_dict(filename): + d = dict() + with open(filename, 'r') as f: + for i, line in enumerate(f): + d[line.strip()] = i + return d + + +def corpus_reader(data_path, words_name, props_name): + """ + Read one corpus. It returns an iterator. Each element of + this iterator is a tuple including sentence and labels. The sentence is + consist of a list of word IDs. The labels include a list of label IDs. + :return: a iterator of data. + :rtype: iterator + """ + + def reader(): + tf = tarfile.open(data_path) + wf = tf.extractfile(words_name) + pf = tf.extractfile(props_name) + with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile( + fileobj=pf) as props_file: + sentences = [] + labels = [] + one_seg = [] + for word, label in itertools.izip(words_file, props_file): + word = word.strip() + label = label.strip().split() + + if len(label) == 0: # end of sentence + for i in xrange(len(one_seg[0])): + a_kind_lable = [x[i] for x in one_seg] + labels.append(a_kind_lable) + + if len(labels) >= 1: + verb_list = [] + for x in labels[0]: + if x != '-': + verb_list.append(x) + + for i, lbl in enumerate(labels[1:]): + cur_tag = 'O' + is_in_bracket = False + lbl_seq = [] + verb_word = '' + for l in lbl: + if l == '*' and is_in_bracket == False: + lbl_seq.append('O') + elif l == '*' and is_in_bracket == True: + lbl_seq.append('I-' + cur_tag) + elif l == '*)': + lbl_seq.append('I-' + cur_tag) + is_in_bracket = False + elif l.find('(') != -1 and l.find(')') != -1: + cur_tag = l[1:l.find('*')] + lbl_seq.append('B-' + cur_tag) + is_in_bracket = False + elif l.find('(') != -1 and l.find(')') == -1: + cur_tag = l[1:l.find('*')] + lbl_seq.append('B-' + cur_tag) + is_in_bracket = True + else: + raise RuntimeError('Unexpected label: %s' % + l) + + yield sentences, verb_list[i], lbl_seq + + sentences = [] + labels = [] + one_seg = [] + else: + sentences.append(word) + one_seg.append(label) + + pf.close() + wf.close() + tf.close() + + return reader + + +def reader_creator(corpus_reader, + word_dict=None, + predicate_dict=None, + label_dict=None): + def reader(): + for sentence, predicate, labels in corpus_reader(): + + sen_len = len(sentence) + + verb_index = labels.index('B-V') + mark = [0] * len(labels) + if verb_index > 0: + mark[verb_index - 1] = 1 + ctx_n1 = sentence[verb_index - 1] + else: + ctx_n1 = 'bos' + + if verb_index > 1: + mark[verb_index - 2] = 1 + ctx_n2 = sentence[verb_index - 2] + else: + ctx_n2 = 'bos' + + mark[verb_index] = 1 + ctx_0 = sentence[verb_index] + + if verb_index < len(labels) - 1: + mark[verb_index + 1] = 1 + ctx_p1 = sentence[verb_index + 1] + else: + ctx_p1 = 'eos' + + if verb_index < len(labels) - 2: + mark[verb_index + 2] = 1 + ctx_p2 = sentence[verb_index + 2] + else: + ctx_p2 = 'eos' + + word_idx = [word_dict.get(w, UNK_IDX) for w in sentence] + + ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len + ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len + ctx_0_idx = [word_dict.get(ctx_0, UNK_IDX)] * sen_len + ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len + ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len + + pred_idx = [predicate_dict.get(predicate)] * sen_len + label_idx = [label_dict.get(w) for w in labels] + + yield word_idx, ctx_n2_idx, ctx_n1_idx, \ + ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx + + return reader + + +def get_dict(): + """ + Get the word, verb and label dictionary of Wikipedia corpus. + """ + word_dict = load_dict( + paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)) + verb_dict = load_dict( + paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) + label_dict = load_label_dict( + paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) + return word_dict, verb_dict, label_dict + + +def get_embedding(): + """ + Get the trained word vector based on Wikipedia corpus. + """ + return paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5) + + +def test(): + """ + Conll05 test set creator. + + Because the training dataset is not free, the test dataset is used for + training. It returns a reader creator, each sample in the reader is nine + features, including sentence sequence, predicate, predicate context, + predicate context flag and tagged sequence. + + :return: Training reader creator + :rtype: callable + """ + word_dict, verb_dict, label_dict = get_dict() + reader = corpus_reader( + paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5), + words_name='conll05st-release/test.wsj/words/test.wsj.words.gz', + props_name='conll05st-release/test.wsj/props/test.wsj.props.gz') + return reader_creator(reader, word_dict, verb_dict, label_dict) + + +def fetch(): + paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5) + paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5) + paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5) + paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5) + paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, test(), 1000, "conl105_train") + paddle.dataset.common.convert(path, test(), 1000, "conl105_test") diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py new file mode 100644 index 0000000000000000000000000000000000000000..f082e33be3357fbe405ab1a1ef5e0e601108a363 --- /dev/null +++ b/python/paddle/dataset/flowers.py @@ -0,0 +1,199 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module will download dataset from +http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html +and parse train/test set intopaddle reader creators. + +This set contains images of flowers belonging to 102 different categories. +The images were acquired by searching the web and taking pictures. There are a +minimum of 40 images for each category. + +The database was used in: + +Nilsback, M-E. and Zisserman, A. Automated flower classification over a large + number of classes.Proceedings of the Indian Conference on Computer Vision, +Graphics and Image Processing (2008) +http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}. + +""" +import cPickle +import itertools +import functools +from common import download +import tarfile +import scipy.io as scio +from paddle.dataset.image import * +from paddle.reader import * +import os +import numpy as np +from multiprocessing import cpu_count +__all__ = ['train', 'test', 'valid'] + +DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz' +LABEL_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat' +SETID_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat' +DATA_MD5 = '33bfc11892f1e405ca193ae9a9f2a118' +LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d' +SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c' +# In official 'readme', tstid is the flag of test data +# and trnid is the flag of train data. But test data is more than train data. +# So we exchange the train data and test data. +TRAIN_FLAG = 'tstid' +TEST_FLAG = 'trnid' +VALID_FLAG = 'valid' + + +def default_mapper(is_train, sample): + ''' + map image bytes data to type needed by model input layer + ''' + img, label = sample + img = load_image_bytes(img) + img = simple_transform( + img, 256, 224, is_train, mean=[103.94, 116.78, 123.68]) + return img.flatten().astype('float32'), label + + +train_mapper = functools.partial(default_mapper, True) +test_mapper = functools.partial(default_mapper, False) + + +def reader_creator(data_file, + label_file, + setid_file, + dataset_name, + mapper, + buffered_size=1024, + use_xmap=True): + ''' + 1. read images from tar file and + merge images into batch files in 102flowers.tgz_batch/ + 2. get a reader to read sample from batch file + + :param data_file: downloaded data file + :type data_file: string + :param label_file: downloaded label file + :type label_file: string + :param setid_file: downloaded setid file containing information + about how to split dataset + :type setid_file: string + :param dataset_name: data set name (tstid|trnid|valid) + :type dataset_name: string + :param mapper: a function to map image bytes data to type + needed by model input layer + :type mapper: callable + :param buffered_size: the size of buffer used to process images + :type buffered_size: int + :return: data reader + :rtype: callable + ''' + labels = scio.loadmat(label_file)['labels'][0] + indexes = scio.loadmat(setid_file)[dataset_name][0] + img2label = {} + for i in indexes: + img = "jpg/image_%05d.jpg" % i + img2label[img] = labels[i - 1] + file_list = batch_images_from_tar(data_file, dataset_name, img2label) + + def reader(): + for file in open(file_list): + file = file.strip() + batch = None + with open(file, 'r') as f: + batch = cPickle.load(f) + data = batch['data'] + labels = batch['label'] + for sample, label in itertools.izip(data, batch['label']): + yield sample, int(label) - 1 + + if use_xmap: + return xmap_readers(mapper, reader, cpu_count(), buffered_size) + else: + return map_readers(mapper, reader) + + +def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): + ''' + Create flowers training set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] + translated from original color image by steps: + 1. resize to 256*256 + 2. random crop to 224*224 + 3. flatten + :param mapper: a function to map sample. + :type mapper: callable + :param buffered_size: the size of buffer used to process images + :type buffered_size: int + :return: train data reader + :rtype: callable + ''' + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper, + buffered_size, use_xmap) + + +def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): + ''' + Create flowers test set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] + translated from original color image by steps: + 1. resize to 256*256 + 2. random crop to 224*224 + 3. flatten + :param mapper: a function to map sample. + :type mapper: callable + :param buffered_size: the size of buffer used to process images + :type buffered_size: int + :return: test data reader + :rtype: callable + ''' + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper, + buffered_size, use_xmap) + + +def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): + ''' + Create flowers validation set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] + translated from original color image by steps: + 1. resize to 256*256 + 2. random crop to 224*224 + 3. flatten + :param mapper: a function to map sample. + :type mapper: callable + :param buffered_size: the size of buffer used to process images + :type buffered_size: int + :return: test data reader + :rtype: callable + ''' + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, mapper, + buffered_size, use_xmap) + + +def fetch(): + download(DATA_URL, 'flowers', DATA_MD5) + download(LABEL_URL, 'flowers', LABEL_MD5) + download(SETID_URL, 'flowers', SETID_MD5) diff --git a/python/paddle/dataset/image.py b/python/paddle/dataset/image.py new file mode 100644 index 0000000000000000000000000000000000000000..9235c41e9eb95b25a0dc53a494a203e7a4525981 --- /dev/null +++ b/python/paddle/dataset/image.py @@ -0,0 +1,381 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This file contains some common interfaces for image preprocess. +Many users are confused about the image layout. We introduce +the image layout as follows. + +- CHW Layout + + - The abbreviations: C=channel, H=Height, W=Width + - The default layout of image opened by cv2 or PIL is HWC. + PaddlePaddle only supports the CHW layout. And CHW is simply + a transpose of HWC. It must transpose the input image. + +- Color format: RGB or BGR + + OpenCV use BGR color format. PIL use RGB color format. Both + formats can be used for training. Noted that, the format should + be keep consistent between the training and inference peroid. +""" +import numpy as np +try: + import cv2 +except ImportError: + cv2 = None +import os +import tarfile +import cPickle + +__all__ = [ + "load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop", + "random_crop", "left_right_flip", "simple_transform", "load_and_transform", + "batch_images_from_tar" +] + + +def batch_images_from_tar(data_file, + dataset_name, + img2label, + num_per_batch=1024): + """ + Read images from tar file and batch them into batch file. + + :param data_file: path of image tar file + :type data_file: string + :param dataset_name: 'train','test' or 'valid' + :type dataset_name: string + :param img2label: a dic with image file name as key + and image's label as value + :type img2label: dic + :param num_per_batch: image number per batch file + :type num_per_batch: int + :return: path of list file containing paths of batch file + :rtype: string + """ + batch_dir = data_file + "_batch" + out_path = "%s/%s" % (batch_dir, dataset_name) + meta_file = "%s/%s.txt" % (batch_dir, dataset_name) + + if os.path.exists(out_path): + return meta_file + else: + os.makedirs(out_path) + + tf = tarfile.open(data_file) + mems = tf.getmembers() + data = [] + labels = [] + file_id = 0 + for mem in mems: + if mem.name in img2label: + data.append(tf.extractfile(mem).read()) + labels.append(img2label[mem.name]) + if len(data) == num_per_batch: + output = {} + output['label'] = labels + output['data'] = data + cPickle.dump( + output, + open('%s/batch_%d' % (out_path, file_id), 'w'), + protocol=cPickle.HIGHEST_PROTOCOL) + file_id += 1 + data = [] + labels = [] + if len(data) > 0: + output = {} + output['label'] = labels + output['data'] = data + cPickle.dump( + output, + open('%s/batch_%d' % (out_path, file_id), 'w'), + protocol=cPickle.HIGHEST_PROTOCOL) + + with open(meta_file, 'a') as meta: + for file in os.listdir(out_path): + meta.write(os.path.abspath("%s/%s" % (out_path, file)) + "\n") + return meta_file + + +def load_image_bytes(bytes, is_color=True): + """ + Load an color or gray image from bytes array. + + Example usage: + + .. code-block:: python + + with open('cat.jpg') as f: + im = load_image_bytes(f.read()) + + :param bytes: the input image bytes array. + :type bytes: str + :param is_color: If set is_color True, it will load and + return a color image. Otherwise, it will + load and return a gray image. + :type is_color: bool + """ + flag = 1 if is_color else 0 + file_bytes = np.asarray(bytearray(bytes), dtype=np.uint8) + img = cv2.imdecode(file_bytes, flag) + return img + + +def load_image(file, is_color=True): + """ + Load an color or gray image from the file path. + + Example usage: + + .. code-block:: python + + im = load_image('cat.jpg') + + :param file: the input image path. + :type file: string + :param is_color: If set is_color True, it will load and + return a color image. Otherwise, it will + load and return a gray image. + :type is_color: bool + """ + # cv2.IMAGE_COLOR for OpenCV3 + # cv2.CV_LOAD_IMAGE_COLOR for older OpenCV Version + # cv2.IMAGE_GRAYSCALE for OpenCV3 + # cv2.CV_LOAD_IMAGE_GRAYSCALE for older OpenCV Version + # Here, use constant 1 and 0 + # 1: COLOR, 0: GRAYSCALE + flag = 1 if is_color else 0 + im = cv2.imread(file, flag) + return im + + +def resize_short(im, size): + """ + Resize an image so that the length of shorter edge is size. + + Example usage: + + .. code-block:: python + + im = load_image('cat.jpg') + im = resize_short(im, 256) + + :param im: the input image with HWC layout. + :type im: ndarray + :param size: the shorter edge size of image after resizing. + :type size: int + """ + h, w = im.shape[:2] + h_new, w_new = size, size + if h > w: + h_new = size * h / w + else: + w_new = size * w / h + im = cv2.resize(im, (h_new, w_new), interpolation=cv2.INTER_CUBIC) + return im + + +def to_chw(im, order=(2, 0, 1)): + """ + Transpose the input image order. The image layout is HWC format + opened by cv2 or PIL. Transpose the input image to CHW layout + according the order (2,0,1). + + Example usage: + + .. code-block:: python + + im = load_image('cat.jpg') + im = resize_short(im, 256) + im = to_chw(im) + + :param im: the input image with HWC layout. + :type im: ndarray + :param order: the transposed order. + :type order: tuple|list + """ + assert len(im.shape) == len(order) + im = im.transpose(order) + return im + + +def center_crop(im, size, is_color=True): + """ + Crop the center of image with size. + + Example usage: + + .. code-block:: python + + im = center_crop(im, 224) + + :param im: the input image with HWC layout. + :type im: ndarray + :param size: the cropping size. + :type size: int + :param is_color: whether the image is color or not. + :type is_color: bool + """ + h, w = im.shape[:2] + h_start = (h - size) / 2 + w_start = (w - size) / 2 + h_end, w_end = h_start + size, w_start + size + if is_color: + im = im[h_start:h_end, w_start:w_end, :] + else: + im = im[h_start:h_end, w_start:w_end] + return im + + +def random_crop(im, size, is_color=True): + """ + Randomly crop input image with size. + + Example usage: + + .. code-block:: python + + im = random_crop(im, 224) + + :param im: the input image with HWC layout. + :type im: ndarray + :param size: the cropping size. + :type size: int + :param is_color: whether the image is color or not. + :type is_color: bool + """ + h, w = im.shape[:2] + h_start = np.random.randint(0, h - size + 1) + w_start = np.random.randint(0, w - size + 1) + h_end, w_end = h_start + size, w_start + size + if is_color: + im = im[h_start:h_end, w_start:w_end, :] + else: + im = im[h_start:h_end, w_start:w_end] + return im + + +def left_right_flip(im, is_color=True): + """ + Flip an image along the horizontal direction. + Return the flipped image. + + Example usage: + + .. code-block:: python + + im = left_right_flip(im) + + :param im: input image with HWC layout or HW layout for gray image + :type im: ndarray + :param is_color: whether input image is color or not + :type is_color: bool + """ + if len(im.shape) == 3 and is_color: + return im[:, ::-1, :] + else: + return im[:, ::-1] + + +def simple_transform(im, + resize_size, + crop_size, + is_train, + is_color=True, + mean=None): + """ + Simply data argumentation for training. These operations include + resizing, croping and flipping. + + Example usage: + + .. code-block:: python + + im = simple_transform(im, 256, 224, True) + + :param im: The input image with HWC layout. + :type im: ndarray + :param resize_size: The shorter edge length of the resized image. + :type resize_size: int + :param crop_size: The cropping size. + :type crop_size: int + :param is_train: Whether it is training or not. + :type is_train: bool + :param is_color: whether the image is color or not. + :type is_color: bool + :param mean: the mean values, which can be element-wise mean values or + mean values per channel. + :type mean: numpy array | list + """ + im = resize_short(im, resize_size) + if is_train: + im = random_crop(im, crop_size, is_color=is_color) + if np.random.randint(2) == 0: + im = left_right_flip(im, is_color) + else: + im = center_crop(im, crop_size, is_color) + im = center_crop(im, crop_size, is_color=is_color) + if len(im.shape) == 3: + im = to_chw(im) + + im = im.astype('float32') + if mean is not None: + mean = np.array(mean, dtype=np.float32) + # mean value, may be one value per channel + if mean.ndim == 1 and is_color: + mean = mean[:, np.newaxis, np.newaxis] + elif mean.ndim == 1: + mean = mean + else: + # elementwise mean + assert len(mean.shape) == len(im) + im -= mean + + return im + + +def load_and_transform(filename, + resize_size, + crop_size, + is_train, + is_color=True, + mean=None): + """ + Load image from the input file `filename` and transform image for + data argumentation. Please refer to the `simple_transform` interface + for the transform operations. + + Example usage: + + .. code-block:: python + + im = load_and_transform('cat.jpg', 256, 224, True) + + :param filename: The file name of input image. + :type filename: string + :param resize_size: The shorter edge length of the resized image. + :type resize_size: int + :param crop_size: The cropping size. + :type crop_size: int + :param is_train: Whether it is training or not. + :type is_train: bool + :param is_color: whether the image is color or not. + :type is_color: bool + :param mean: the mean values, which can be element-wise mean values or + mean values per channel. + :type mean: numpy array | list + """ + im = load_image(filename, is_color) + im = simple_transform(im, resize_size, crop_size, is_train, is_color, mean) + return im diff --git a/python/paddle/dataset/imdb.py b/python/paddle/dataset/imdb.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff05b1e9b7f4c42909370a21beb140ecdcd6868 --- /dev/null +++ b/python/paddle/dataset/imdb.py @@ -0,0 +1,147 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +IMDB dataset. + +This module downloads IMDB dataset from +http://ai.stanford.edu/%7Eamaas/data/sentiment/. This dataset contains a set +of 25,000 highly polar movie reviews for training, and 25,000 for testing. +Besides, this module also provides API for building dictionary. +""" + +import paddle.dataset.common +import collections +import tarfile +import re +import string + +__all__ = ['build_dict', 'train', 'test', 'convert'] + +URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz' +MD5 = '7c2ac02c03563afcf9b574c7e56c153a' + + +def tokenize(pattern): + """ + Read files that match the given pattern. Tokenize and yield each file. + """ + + with tarfile.open(paddle.dataset.common.download(URL, 'imdb', MD5)) as tarf: + # Note that we should use tarfile.next(), which does + # sequential access of member files, other than + # tarfile.extractfile, which does random access and might + # destroy hard disks. + tf = tarf.next() + while tf != None: + if bool(pattern.match(tf.name)): + # newline and punctuations removal and ad-hoc tokenization. + yield tarf.extractfile(tf).read().rstrip("\n\r").translate( + None, string.punctuation).lower().split() + tf = tarf.next() + + +def build_dict(pattern, cutoff): + """ + Build a word dictionary from the corpus. Keys of the dictionary are words, + and values are zero-based IDs of these words. + """ + word_freq = collections.defaultdict(int) + for doc in tokenize(pattern): + for word in doc: + word_freq[word] += 1 + + # Not sure if we should prune less-frequent words here. + word_freq = filter(lambda x: x[1] > cutoff, word_freq.items()) + + dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*dictionary)) + word_idx = dict(zip(words, xrange(len(words)))) + word_idx[''] = len(words) + return word_idx + + +def reader_creator(pos_pattern, neg_pattern, word_idx): + UNK = word_idx[''] + INS = [] + + def load(pattern, out, label): + for doc in tokenize(pattern): + out.append(([word_idx.get(w, UNK) for w in doc], label)) + + load(pos_pattern, INS, 0) + load(neg_pattern, INS, 1) + + def reader(): + for doc, label in INS: + yield doc, label + + return reader + + +def train(word_idx): + """ + IMDB training set creator. + + It returns a reader creator, each sample in the reader is an zero-based ID + sequence and label in [0, 1]. + + :param word_idx: word dictionary + :type word_idx: dict + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + re.compile("aclImdb/train/pos/.*\.txt$"), + re.compile("aclImdb/train/neg/.*\.txt$"), word_idx) + + +def test(word_idx): + """ + IMDB test set creator. + + It returns a reader creator, each sample in the reader is an zero-based ID + sequence and label in [0, 1]. + + :param word_idx: word dictionary + :type word_idx: dict + :return: Test reader creator + :rtype: callable + """ + return reader_creator( + re.compile("aclImdb/test/pos/.*\.txt$"), + re.compile("aclImdb/test/neg/.*\.txt$"), word_idx) + + +def word_dict(): + """ + Build a word dictionary from the corpus. + + :return: Word dictionary + :rtype: dict + """ + return build_dict( + re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150) + + +def fetch(): + paddle.dataset.common.download(URL, 'imdb', MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + w = word_dict() + paddle.dataset.common.convert(path, lambda: train(w), 1000, "imdb_train") + paddle.dataset.common.convert(path, lambda: test(w), 1000, "imdb_test") diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py new file mode 100644 index 0000000000000000000000000000000000000000..c6c0a0f54373dd068b2c493f6fbc9c8578593aef --- /dev/null +++ b/python/paddle/dataset/imikolov.py @@ -0,0 +1,160 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +imikolov's simple dataset. + +This module will download dataset from +http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set +into paddle reader creators. +""" +import paddle.dataset.common +import collections +import tarfile + +__all__ = ['train', 'test', 'build_dict', 'convert'] + +URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' +MD5 = '30177ea32e27c525793142b6bf2c8e2d' + + +class DataType(object): + NGRAM = 1 + SEQ = 2 + + +def word_count(f, word_freq=None): + if word_freq is None: + word_freq = collections.defaultdict(int) + + for l in f: + for w in l.strip().split(): + word_freq[w] += 1 + word_freq[''] += 1 + word_freq[''] += 1 + + return word_freq + + +def build_dict(min_word_freq=50): + """ + Build a word dictionary from the corpus, Keys of the dictionary are words, + and values are zero-based IDs of these words. + """ + train_filename = './simple-examples/data/ptb.train.txt' + test_filename = './simple-examples/data/ptb.valid.txt' + with tarfile.open( + paddle.dataset.common.download(paddle.dataset.imikolov.URL, + 'imikolov', + paddle.dataset.imikolov.MD5)) as tf: + trainf = tf.extractfile(train_filename) + testf = tf.extractfile(test_filename) + word_freq = word_count(testf, word_count(trainf)) + if '' in word_freq: + # remove for now, since we will set it as last index + del word_freq[''] + + word_freq = filter(lambda x: x[1] > min_word_freq, word_freq.items()) + + word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*word_freq_sorted)) + word_idx = dict(zip(words, xrange(len(words)))) + word_idx[''] = len(words) + + return word_idx + + +def reader_creator(filename, word_idx, n, data_type): + def reader(): + with tarfile.open( + paddle.dataset.common.download( + paddle.dataset.imikolov.URL, 'imikolov', + paddle.dataset.imikolov.MD5)) as tf: + f = tf.extractfile(filename) + + UNK = word_idx[''] + for l in f: + if DataType.NGRAM == data_type: + assert n > -1, 'Invalid gram length' + l = [''] + l.strip().split() + [''] + if len(l) >= n: + l = [word_idx.get(w, UNK) for w in l] + for i in range(n, len(l) + 1): + yield tuple(l[i - n:i]) + elif DataType.SEQ == data_type: + l = l.strip().split() + l = [word_idx.get(w, UNK) for w in l] + src_seq = [word_idx['']] + l + trg_seq = l + [word_idx['']] + if n > 0 and len(src_seq) > n: continue + yield src_seq, trg_seq + else: + assert False, 'Unknow data type' + + return reader + + +def train(word_idx, n, data_type=DataType.NGRAM): + """ + imikolov training set creator. + + It returns a reader creator, each sample in the reader is a word ID + tuple. + + :param word_idx: word dictionary + :type word_idx: dict + :param n: sliding window size if type is ngram, otherwise max length of sequence + :type n: int + :param data_type: data type (ngram or sequence) + :type data_type: member variable of DataType (NGRAM or SEQ) + :return: Training reader creator + :rtype: callable + """ + return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n, + data_type) + + +def test(word_idx, n, data_type=DataType.NGRAM): + """ + imikolov test set creator. + + It returns a reader creator, each sample in the reader is a word ID + tuple. + + :param word_idx: word dictionary + :type word_idx: dict + :param n: sliding window size if type is ngram, otherwise max length of sequence + :type n: int + :param data_type: data type (ngram or sequence) + :type data_type: member variable of DataType (NGRAM or SEQ) + :return: Test reader creator + :rtype: callable + """ + return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n, + data_type) + + +def fetch(): + paddle.dataset.common.download(URL, "imikolov", MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + N = 5 + word_dict = build_dict() + paddle.dataset.common.convert(path, + train(word_dict, N), 1000, "imikolov_train") + paddle.dataset.common.convert(path, + test(word_dict, N), 1000, "imikolov_test") diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1b8b5fac223c0d134cae69a61a0c2c00bc1feb --- /dev/null +++ b/python/paddle/dataset/mnist.py @@ -0,0 +1,122 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +MNIST dataset. + +This module will download dataset from http://yann.lecun.com/exdb/mnist/ and +parse training set and test set into paddle reader creators. +""" +import paddle.dataset.common +import subprocess +import numpy +import platform +__all__ = ['train', 'test', 'convert'] + +URL_PREFIX = 'http://yann.lecun.com/exdb/mnist/' +TEST_IMAGE_URL = URL_PREFIX + 't10k-images-idx3-ubyte.gz' +TEST_IMAGE_MD5 = '9fb629c4189551a2d022fa330f9573f3' +TEST_LABEL_URL = URL_PREFIX + 't10k-labels-idx1-ubyte.gz' +TEST_LABEL_MD5 = 'ec29112dd5afa0611ce80d1b7f02629c' +TRAIN_IMAGE_URL = URL_PREFIX + 'train-images-idx3-ubyte.gz' +TRAIN_IMAGE_MD5 = 'f68b3c2dcbeaaa9fbdd348bbdeb94873' +TRAIN_LABEL_URL = URL_PREFIX + 'train-labels-idx1-ubyte.gz' +TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432' + + +def reader_creator(image_filename, label_filename, buffer_size): + def reader(): + if platform.system() == 'Darwin': + zcat_cmd = 'gzcat' + elif platform.system() == 'Linux': + zcat_cmd = 'zcat' + else: + raise NotImplementedError() + + # According to http://stackoverflow.com/a/38061619/724872, we + # cannot use standard package gzip here. + m = subprocess.Popen([zcat_cmd, image_filename], stdout=subprocess.PIPE) + m.stdout.read(16) # skip some magic bytes + + l = subprocess.Popen([zcat_cmd, label_filename], stdout=subprocess.PIPE) + l.stdout.read(8) # skip some magic bytes + + try: # reader could be break. + while True: + labels = numpy.fromfile( + l.stdout, 'ubyte', count=buffer_size).astype("int") + + if labels.size != buffer_size: + break # numpy.fromfile returns empty slice after EOF. + + images = numpy.fromfile( + m.stdout, 'ubyte', count=buffer_size * 28 * 28).reshape( + (buffer_size, 28 * 28)).astype('float32') + + images = images / 255.0 * 2.0 - 1.0 + + for i in xrange(buffer_size): + yield images[i, :], int(labels[i]) + finally: + m.terminate() + l.terminate() + + return reader + + +def train(): + """ + MNIST training set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', + TRAIN_IMAGE_MD5), + paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', + TRAIN_LABEL_MD5), 100) + + +def test(): + """ + MNIST test set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Test reader creator. + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5), + paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5), + 100) + + +def fetch(): + paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5) + paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) + paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5) + paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, train(), 1000, "minist_train") + paddle.dataset.common.convert(path, test(), 1000, "minist_test") diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py new file mode 100644 index 0000000000000000000000000000000000000000..ab11716202a8298c182e23b661eb1d2ac74bf4da --- /dev/null +++ b/python/paddle/dataset/movielens.py @@ -0,0 +1,262 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Movielens 1-M dataset. + +Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000 +movies, which was collected by GroupLens Research. This module will download +Movielens 1-M dataset from +http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training +set and test set into paddle reader creators. + +""" + +import zipfile +import paddle.dataset.common +import re +import random +import functools + +__all__ = [ + 'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id', + 'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info', + 'convert' +] + +age_table = [1, 18, 25, 35, 45, 50, 56] + +URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' +MD5 = 'c4d9eecfca2ab87c1945afe126590906' + + +class MovieInfo(object): + """ + Movie id, title and categories information are stored in MovieInfo. + """ + + def __init__(self, index, categories, title): + self.index = int(index) + self.categories = categories + self.title = title + + def value(self): + """ + Get information from a movie. + """ + return [ + self.index, [CATEGORIES_DICT[c] for c in self.categories], + [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()] + ] + + def __str__(self): + return "" % ( + self.index, self.title, self.categories) + + def __repr__(self): + return self.__str__() + + +class UserInfo(object): + """ + User id, gender, age, and job information are stored in UserInfo. + """ + + def __init__(self, index, gender, age, job_id): + self.index = int(index) + self.is_male = gender == 'M' + self.age = age_table.index(int(age)) + self.job_id = int(job_id) + + def value(self): + """ + Get information from a user. + """ + return [self.index, 0 if self.is_male else 1, self.age, self.job_id] + + def __str__(self): + return "" % ( + self.index, "M" + if self.is_male else "F", age_table[self.age], self.job_id) + + def __repr__(self): + return str(self) + + +MOVIE_INFO = None +MOVIE_TITLE_DICT = None +CATEGORIES_DICT = None +USER_INFO = None + + +def __initialize_meta_info__(): + fn = paddle.dataset.common.download(URL, "movielens", MD5) + global MOVIE_INFO + if MOVIE_INFO is None: + pattern = re.compile(r'^(.*)\((\d+)\)$') + with zipfile.ZipFile(file=fn) as package: + for info in package.infolist(): + assert isinstance(info, zipfile.ZipInfo) + MOVIE_INFO = dict() + title_word_set = set() + categories_set = set() + with package.open('ml-1m/movies.dat') as movie_file: + for i, line in enumerate(movie_file): + movie_id, title, categories = line.strip().split('::') + categories = categories.split('|') + for c in categories: + categories_set.add(c) + title = pattern.match(title).group(1) + MOVIE_INFO[int(movie_id)] = MovieInfo( + index=movie_id, categories=categories, title=title) + for w in title.split(): + title_word_set.add(w.lower()) + + global MOVIE_TITLE_DICT + MOVIE_TITLE_DICT = dict() + for i, w in enumerate(title_word_set): + MOVIE_TITLE_DICT[w] = i + + global CATEGORIES_DICT + CATEGORIES_DICT = dict() + for i, c in enumerate(categories_set): + CATEGORIES_DICT[c] = i + + global USER_INFO + USER_INFO = dict() + with package.open('ml-1m/users.dat') as user_file: + for line in user_file: + uid, gender, age, job, _ = line.strip().split("::") + USER_INFO[int(uid)] = UserInfo( + index=uid, gender=gender, age=age, job_id=job) + return fn + + +def __reader__(rand_seed=0, test_ratio=0.1, is_test=False): + fn = __initialize_meta_info__() + rand = random.Random(x=rand_seed) + with zipfile.ZipFile(file=fn) as package: + with package.open('ml-1m/ratings.dat') as rating: + for line in rating: + if (rand.random() < test_ratio) == is_test: + uid, mov_id, rating, _ = line.strip().split("::") + uid = int(uid) + mov_id = int(mov_id) + rating = float(rating) * 2 - 5.0 + + mov = MOVIE_INFO[mov_id] + usr = USER_INFO[uid] + yield usr.value() + mov.value() + [[rating]] + + +def __reader_creator__(**kwargs): + return lambda: __reader__(**kwargs) + + +train = functools.partial(__reader_creator__, is_test=False) +test = functools.partial(__reader_creator__, is_test=True) + + +def get_movie_title_dict(): + """ + Get movie title dictionary. + """ + __initialize_meta_info__() + return MOVIE_TITLE_DICT + + +def __max_index_info__(a, b): + if a.index > b.index: + return a + else: + return b + + +def max_movie_id(): + """ + Get the maximum value of movie id. + """ + __initialize_meta_info__() + return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index + + +def max_user_id(): + """ + Get the maximum value of user id. + """ + __initialize_meta_info__() + return reduce(__max_index_info__, USER_INFO.viewvalues()).index + + +def __max_job_id_impl__(a, b): + if a.job_id > b.job_id: + return a + else: + return b + + +def max_job_id(): + """ + Get the maximum value of job id. + """ + __initialize_meta_info__() + return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id + + +def movie_categories(): + """ + Get movie categoriges dictionary. + """ + __initialize_meta_info__() + return CATEGORIES_DICT + + +def user_info(): + """ + Get user info dictionary. + """ + __initialize_meta_info__() + return USER_INFO + + +def movie_info(): + """ + Get movie info dictionary. + """ + __initialize_meta_info__() + return MOVIE_INFO + + +def unittest(): + for train_count, _ in enumerate(train()()): + pass + for test_count, _ in enumerate(test()()): + pass + + print train_count, test_count + + +def fetch(): + paddle.dataset.common.download(URL, "movielens", MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, train(), 1000, "movielens_train") + paddle.dataset.common.convert(path, test(), 1000, "movielens_test") + + +if __name__ == '__main__': + unittest() diff --git a/python/paddle/dataset/mq2007.py b/python/paddle/dataset/mq2007.py new file mode 100644 index 0000000000000000000000000000000000000000..d3b3dd524c34be660c5f2d4fc5ce2fa0420efbc1 --- /dev/null +++ b/python/paddle/dataset/mq2007.py @@ -0,0 +1,333 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +MQ2007 dataset + +MQ2007 is a query set from Million Query track of TREC 2007. There are about 1700 queries in it with labeled documents. In MQ2007, the 5-fold cross +validation strategy is adopted and the 5-fold partitions are included in the package. In each fold, there are three subsets for learning: training set, +validation set and testing set. + +MQ2007 dataset from website +http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar and parse training set and test set into paddle reader creators + +""" + +import os +import functools +import rarfile +from common import download +import numpy as np + +# URL = "http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar" +URL = "http://www.bigdatalab.ac.cn/benchmark/upload/download_source/7b6dbbe2-842c-11e4-a536-bcaec51b9163_MQ2007.rar" +MD5 = "7be1640ae95c6408dab0ae7207bdc706" + + +def __initialize_meta_info__(): + """ + download and extract the MQ2007 dataset + """ + fn = fetch() + rar = rarfile.RarFile(fn) + dirpath = os.path.dirname(fn) + rar.extractall(path=dirpath) + return dirpath + + +class Query(object): + """ + queries used for learning to rank algorithms. It is created from relevance scores, query-document feature vectors + + Parameters: + ---------- + query_id : int + query_id in dataset, mapping from query to relevance documents + relevance_score : int + relevance score of query and document pair + feature_vector : array, dense feature + feature in vector format + description : string + comment section in query doc pair data + """ + + def __init__(self, + query_id=-1, + relevance_score=-1, + feature_vector=None, + description=""): + self.query_id = query_id + self.relevance_score = relevance_score + if feature_vector is None: + self.feature_vector = [] + else: + self.feature_vector = feature_vector + self.description = description + + def __str__(self): + string = "%s %s %s" % (str(self.relevance_score), str(self.query_id), + " ".join(str(f) for f in self.feature_vector)) + return string + + # @classmethod + def _parse_(self, text): + """ + parse line into Query + """ + comment_position = text.find('#') + line = text[:comment_position].strip() + self.description = text[comment_position + 1:].strip() + parts = line.split() + if len(parts) != 48: + sys.stdout.write("expect 48 space split parts, get %d" % + (len(parts))) + return None + # format : 0 qid:10 1:0.000272 2:0.000000 .... + self.relevance_score = int(parts[0]) + self.query_id = int(parts[1].split(':')[1]) + for p in parts[2:]: + pair = p.split(':') + self.feature_vector.append(float(pair[1])) + return self + + +class QueryList(object): + """ + group query into list, every item in list is a Query + """ + + def __init__(self, querylist=None): + self.query_id = -1 + if querylist is None: + self.querylist = [] + else: + self.querylist = querylist + for query in self.querylist: + if self.query_id == -1: + self.query_id = query.query_id + else: + if self.query_id != query.query_id: + raise ValueError("query in list must be same query_id") + + def __iter__(self): + for query in self.querylist: + yield query + + def __len__(self): + return len(self.querylist) + + def __getitem__(self, i): + return self.querylist[i] + + def _correct_ranking_(self): + if self.querylist is None: + return + self.querylist.sort(key=lambda x: x.relevance_score, reverse=True) + + def _add_query(self, query): + if self.query_id == -1: + self.query_id = query.query_id + else: + if self.query_id != query.query_id: + raise ValueError("query in list must be same query_id") + self.querylist.append(query) + + +def gen_plain_txt(querylist): + """ + gen plain text in list for other usage + Paramters: + -------- + querylist : querylist, one query match many docment pairs in list, see QueryList + + return : + ------ + query_id : np.array, shape=(samples_num, ) + label : np.array, shape=(samples_num, ) + querylist : np.array, shape=(samples_num, feature_dimension) + """ + if not isinstance(querylist, QueryList): + querylist = QueryList(querylist) + querylist._correct_ranking_() + for query in querylist: + yield querylist.query_id, query.relevance_score, np.array( + query.feature_vector) + + +def gen_point(querylist): + """ + gen item in list for point-wise learning to rank algorithm + Paramters: + -------- + querylist : querylist, one query match many docment pairs in list, see QueryList + + return : + ------ + label : np.array, shape=(samples_num, ) + querylist : np.array, shape=(samples_num, feature_dimension) + """ + if not isinstance(querylist, QueryList): + querylist = QueryList(querylist) + querylist._correct_ranking_() + for query in querylist: + yield query.relevance_score, np.array(query.feature_vector) + + +def gen_pair(querylist, partial_order="full"): + """ + gen pair for pair-wise learning to rank algorithm + Paramters: + -------- + querylist : querylist, one query match many docment pairs in list, see QueryList + pairtial_order : "full" or "neighbour" + there is redudant in all possiable pair combinations, which can be simplifed + gen pairs for neighbour items or the full partial order pairs + + return : + ------ + label : np.array, shape=(1) + query_left : np.array, shape=(1, feature_dimension) + query_right : same as left + """ + if not isinstance(querylist, QueryList): + querylist = QueryList(querylist) + querylist._correct_ranking_() + labels = [] + docpairs = [] + + # C(n,2) + for i in range(len(querylist)): + query_left = querylist[i] + for j in range(i + 1, len(querylist)): + query_right = querylist[j] + if query_left.relevance_score > query_right.relevance_score: + labels.append([1]) + docpairs.append([ + np.array(query_left.feature_vector), + np.array(query_right.feature_vector) + ]) + elif query_left.relevance_score < query_right.relevance_score: + labels.append([1]) + docpairs.append([ + np.array(query_right.feature_vector), + np.array(query_left.feature_vector) + ]) + for label, pair in zip(labels, docpairs): + yield np.array(label), pair[0], pair[1] + + +def gen_list(querylist): + """ + gen item in list for list-wise learning to rank algorithm + Paramters: + -------- + querylist : querylist, one query match many docment pairs in list, see QueryList + + return : + ------ + label : np.array, shape=(samples_num, ) + querylist : np.array, shape=(samples_num, feature_dimension) + """ + if not isinstance(querylist, QueryList): + querylist = QueryList(querylist) + querylist._correct_ranking_() + relevance_score_list = [[query.relevance_score] for query in querylist] + feature_vector_list = [query.feature_vector for query in querylist] + yield np.array(relevance_score_list), np.array(feature_vector_list) + + +def query_filter(querylists): + """ + filter query get only document with label 0. + label 0, 1, 2 means the relevance score document with query + parameters : + querylist : QueyList list + + return : + querylist : QueyList list + """ + filter_query = [] + for querylist in querylists: + relevance_score_list = [query.relevance_score for query in querylist] + if sum(relevance_score_list) != .0: + filter_query.append(querylist) + return filter_query + + +def load_from_text(filepath, shuffle=False, fill_missing=-1): + """ + parse data file into querys + """ + prev_query_id = -1 + querylists = [] + querylist = None + fn = __initialize_meta_info__() + with open(os.path.join(fn, filepath)) as f: + for line in f: + query = Query() + query = query._parse_(line) + if query == None: + continue + if query.query_id != prev_query_id: + if querylist is not None: + querylists.append(querylist) + querylist = QueryList() + prev_query_id = query.query_id + querylist._add_query(query) + if querylist is not None: + querylists.append(querylist) + return querylists + + +def __reader__(filepath, format="pairwise", shuffle=False, fill_missing=-1): + """ + Parameters + -------- + filename : string + fill_missing : fill the missing value. default in MQ2007 is -1 + + Returns + ------ + yield + label query_left, query_right # format = "pairwise" + label querylist # format = "listwise" + """ + querylists = query_filter( + load_from_text( + filepath, shuffle=shuffle, fill_missing=fill_missing)) + for querylist in querylists: + if format == "plain_txt": + yield next(gen_plain_txt(querylist)) + elif format == "pointwise": + yield next(gen_point(querylist)) + elif format == "pairwise": + for pair in gen_pair(querylist): + yield pair + elif format == "listwise": + yield next(gen_list(querylist)) + + +train = functools.partial(__reader__, filepath="MQ2007/MQ2007/Fold1/train.txt") +test = functools.partial(__reader__, filepath="MQ2007/MQ2007/Fold1/test.txt") + + +def fetch(): + return download(URL, "MQ2007", MD5) + + +if __name__ == "__main__": + fetch() + mytest = functools.partial( + __reader__, filepath="MQ2007/MQ2007/Fold1/sample", format="listwise") + for label, query in mytest(): + print label, query diff --git a/python/paddle/dataset/sentiment.py b/python/paddle/dataset/sentiment.py new file mode 100644 index 0000000000000000000000000000000000000000..f5461164fe6b816356e42fc7b7dcf388eccfadfb --- /dev/null +++ b/python/paddle/dataset/sentiment.py @@ -0,0 +1,140 @@ +# /usr/bin/env python +# -*- coding:utf-8 -*- + +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The script fetch and preprocess movie_reviews data set that provided by NLTK + +TODO(yuyang18): Complete dataset. +""" + +import collections +from itertools import chain + +import nltk +from nltk.corpus import movie_reviews + +import paddle.dataset.common + +__all__ = ['train', 'test', 'get_word_dict', 'convert'] +NUM_TRAINING_INSTANCES = 1600 +NUM_TOTAL_INSTANCES = 2000 + + +def download_data_if_not_yet(): + """ + Download the data set, if the data set is not download. + """ + try: + # make sure that nltk can find the data + if paddle.dataset.common.DATA_HOME not in nltk.data.path: + nltk.data.path.append(paddle.dataset.common.DATA_HOME) + movie_reviews.categories() + except LookupError: + print "Downloading movie_reviews data set, please wait....." + nltk.download( + 'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) + print "Download data set success....." + print "Path is " + nltk.data.find('corpora/movie_reviews').path + + +def get_word_dict(): + """ + Sorted the words by the frequency of words which occur in sample + :return: + words_freq_sorted + """ + words_freq_sorted = list() + word_freq_dict = collections.defaultdict(int) + download_data_if_not_yet() + + for category in movie_reviews.categories(): + for field in movie_reviews.fileids(category): + for words in movie_reviews.words(field): + word_freq_dict[words] += 1 + words_sort_list = word_freq_dict.items() + words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) + for index, word in enumerate(words_sort_list): + words_freq_sorted.append((word[0], index)) + return words_freq_sorted + + +def sort_files(): + """ + Sorted the sample for cross reading the sample + :return: + files_list + """ + files_list = list() + neg_file_list = movie_reviews.fileids('neg') + pos_file_list = movie_reviews.fileids('pos') + files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) + return files_list + + +def load_sentiment_data(): + """ + Load the data set + :return: + data_set + """ + data_set = list() + download_data_if_not_yet() + words_ids = dict(get_word_dict()) + for sample_file in sort_files(): + words_list = list() + category = 0 if 'neg' in sample_file else 1 + for word in movie_reviews.words(sample_file): + words_list.append(words_ids[word.lower()]) + data_set.append((words_list, category)) + return data_set + + +def reader_creator(data): + """ + Reader creator, generate an iterator for data set + :param data: + train data set or test data set + """ + for each in data: + yield each[0], each[1] + + +def train(): + """ + Default training set reader creator + """ + data_set = load_sentiment_data() + return reader_creator(data_set[0:NUM_TRAINING_INSTANCES]) + + +def test(): + """ + Default test set reader creator + """ + data_set = load_sentiment_data() + return reader_creator(data_set[NUM_TRAINING_INSTANCES:]) + + +def fetch(): + nltk.download('movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, train, 1000, "sentiment_train") + paddle.dataset.common.convert(path, test, 1000, "sentiment_test") diff --git a/python/paddle/dataset/tests/CMakeLists.txt b/python/paddle/dataset/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..485c38a13b573664d8033c237272a10ebb7c9701 --- /dev/null +++ b/python/paddle/dataset/tests/CMakeLists.txt @@ -0,0 +1 @@ +py_test(test_image SRCS test_image.py) diff --git a/python/paddle/dataset/tests/cat.jpg b/python/paddle/dataset/tests/cat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc1fbbd371216b9904b522ed302700c79d2e4876 Binary files /dev/null and b/python/paddle/dataset/tests/cat.jpg differ diff --git a/python/paddle/dataset/tests/cifar_test.py b/python/paddle/dataset/tests/cifar_test.py new file mode 100644 index 0000000000000000000000000000000000000000..839125b09dd5c6432e3572374a7345a77a43f7cf --- /dev/null +++ b/python/paddle/dataset/tests/cifar_test.py @@ -0,0 +1,56 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.cifar +import unittest + + +class TestCIFAR(unittest.TestCase): + def check_reader(self, reader): + sum = 0 + label = 0 + for l in reader(): + self.assertEqual(l[0].size, 3072) + if l[1] > label: + label = l[1] + sum += 1 + return sum, label + + def test_test10(self): + instances, max_label_value = self.check_reader( + paddle.dataset.cifar.test10()) + self.assertEqual(instances, 10000) + self.assertEqual(max_label_value, 9) + + def test_train10(self): + instances, max_label_value = self.check_reader( + paddle.dataset.cifar.train10()) + self.assertEqual(instances, 50000) + self.assertEqual(max_label_value, 9) + + def test_test100(self): + instances, max_label_value = self.check_reader( + paddle.dataset.cifar.test100()) + self.assertEqual(instances, 10000) + self.assertEqual(max_label_value, 99) + + def test_train100(self): + instances, max_label_value = self.check_reader( + paddle.dataset.cifar.train100()) + self.assertEqual(instances, 50000) + self.assertEqual(max_label_value, 99) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/common_test.py b/python/paddle/dataset/tests/common_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e7cc02aa83061599ffefa18de6cb02ac0fc9e9b7 --- /dev/null +++ b/python/paddle/dataset/tests/common_test.py @@ -0,0 +1,94 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.common +import unittest +import tempfile +import glob + + +class TestCommon(unittest.TestCase): + def test_md5file(self): + _, temp_path = tempfile.mkstemp() + with open(temp_path, 'w') as f: + f.write("Hello\n") + self.assertEqual('09f7e02f1290be211da707a266f153b3', + paddle.dataset.common.md5file(temp_path)) + + def test_download(self): + yi_avatar = 'https://avatars0.githubusercontent.com/u/1548775?v=3&s=460' + self.assertEqual( + paddle.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460', + paddle.dataset.common.download(yi_avatar, 'test', + 'f75287202d6622414c706c36c16f8e0d')) + + def test_split(self): + def test_reader(): + def reader(): + for x in xrange(10): + yield x + + return reader + + _, temp_path = tempfile.mkstemp() + paddle.dataset.common.split( + test_reader(), 4, suffix=temp_path + '/test-%05d.pickle') + files = glob.glob(temp_path + '/test-%05d.pickle') + self.assertEqual(len(files), 3) + + def test_cluster_file_reader(self): + _, temp_path = tempfile.mkstemp() + for x in xrange(5): + with open(temp_path + '/%05d.test' % x) as f: + f.write('%d\n' % x) + reader = paddle.dataset.common.cluster_files_reader( + temp_path + '/*.test', 5, 0) + for idx, e in enumerate(reader()): + self.assertEqual(e, str("0")) + + def test_convert(self): + record_num = 10 + num_shards = 4 + + def test_reader(): + def reader(): + for x in xrange(record_num): + yield x + + return reader + + path = tempfile.mkdtemp() + paddle.dataset.common.convert(path, + test_reader(), num_shards, + 'random_images') + + files = glob.glob(path + '/random_images-*') + self.assertEqual(len(files), num_shards) + + recs = [] + for i in range(0, num_shards): + n = "%s/random_images-%05d-of-%05d" % (path, i, num_shards - 1) + r = recordio.reader(n) + while True: + d = r.read() + if d is None: + break + recs.append(d) + + recs.sort() + self.assertEqual(total, record_num) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/flowers_test.py b/python/paddle/dataset/tests/flowers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..06260fd796ce0271b7cec2f42a8a5a255a02dc24 --- /dev/null +++ b/python/paddle/dataset/tests/flowers_test.py @@ -0,0 +1,51 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.flowers +import unittest + + +class TestFlowers(unittest.TestCase): + def check_reader(self, reader): + sum = 0 + label = 0 + size = 224 * 224 * 3 + for l in reader(): + self.assertEqual(l[0].size, size) + if l[1] > label: + label = l[1] + sum += 1 + return sum, label + + def test_train(self): + instances, max_label_value = self.check_reader( + paddle.dataset.flowers.train()) + self.assertEqual(instances, 6149) + self.assertEqual(max_label_value, 102) + + def test_test(self): + instances, max_label_value = self.check_reader( + paddle.dataset.flowers.test()) + self.assertEqual(instances, 1020) + self.assertEqual(max_label_value, 102) + + def test_valid(self): + instances, max_label_value = self.check_reader( + paddle.dataset.flowers.valid()) + self.assertEqual(instances, 1020) + self.assertEqual(max_label_value, 102) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/imdb_test.py b/python/paddle/dataset/tests/imdb_test.py new file mode 100644 index 0000000000000000000000000000000000000000..539da049449cd273db0a9e260851ed40e1be0f04 --- /dev/null +++ b/python/paddle/dataset/tests/imdb_test.py @@ -0,0 +1,55 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.imdb +import unittest +import re + +TRAIN_POS_PATTERN = re.compile("aclImdb/train/pos/.*\.txt$") +TRAIN_NEG_PATTERN = re.compile("aclImdb/train/neg/.*\.txt$") +TRAIN_PATTERN = re.compile("aclImdb/train/.*\.txt$") + +TEST_POS_PATTERN = re.compile("aclImdb/test/pos/.*\.txt$") +TEST_NEG_PATTERN = re.compile("aclImdb/test/neg/.*\.txt$") +TEST_PATTERN = re.compile("aclImdb/test/.*\.txt$") + + +class TestIMDB(unittest.TestCase): + word_idx = None + + def test_build_dict(self): + if self.word_idx == None: + self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) + + self.assertEqual(len(self.word_idx), 7036) + + def check_dataset(self, dataset, expected_size): + if self.word_idx == None: + self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) + + sum = 0 + for l in dataset(self.word_idx): + self.assertEqual(l[1], sum % 2) + sum += 1 + self.assertEqual(sum, expected_size) + + def test_train(self): + self.check_dataset(paddle.dataset.imdb.train, 25000) + + def test_test(self): + self.check_dataset(paddle.dataset.imdb.test, 25000) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/imikolov_test.py b/python/paddle/dataset/tests/imikolov_test.py new file mode 100644 index 0000000000000000000000000000000000000000..233fd9fc8cea4cd0b5cd052580030fc8c993693c --- /dev/null +++ b/python/paddle/dataset/tests/imikolov_test.py @@ -0,0 +1,67 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.imikolov +import unittest + +WORD_DICT = paddle.dataset.imikolov.build_dict() + + +class TestMikolov(unittest.TestCase): + def check_reader(self, reader, n): + for l in reader(): + self.assertEqual(len(l), n) + + def test_train(self): + n = 5 + self.check_reader(paddle.dataset.imikolov.train(WORD_DICT, n), n) + + first_line = 'aer banknote berlitz calloway centrust cluett fromstein '\ + 'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts '\ + 'rake regatta rubens sim snack-food ssangyong swapo wachter' + first_line = [ + WORD_DICT.get(ch, WORD_DICT['']) + for ch in first_line.split(' ') + ] + for l in paddle.dataset.imikolov.train( + WORD_DICT, n=-1, + data_type=paddle.dataset.imikolov.DataType.SEQ)(): + read_line = l[0][1:] + break + self.assertEqual(first_line, read_line) + + def test_test(self): + n = 5 + self.check_reader(paddle.dataset.imikolov.test(WORD_DICT, n), n) + + first_line = 'consumers may want to move their telephones a little '\ + 'closer to the tv set' + first_line = [ + WORD_DICT.get(ch, WORD_DICT['']) + for ch in first_line.split(' ') + ] + for l in paddle.dataset.imikolov.test( + WORD_DICT, n=-1, + data_type=paddle.dataset.imikolov.DataType.SEQ)(): + read_line = l[0][1:] + break + self.assertEqual(first_line, read_line) + + def test_total(self): + _, idx = zip(*WORD_DICT.items()) + self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/mnist_test.py b/python/paddle/dataset/tests/mnist_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8ada19d3f2ee13e194d08e19a4b86b558c69a0a7 --- /dev/null +++ b/python/paddle/dataset/tests/mnist_test.py @@ -0,0 +1,44 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.mnist +import unittest + + +class TestMNIST(unittest.TestCase): + def check_reader(self, reader): + sum = 0 + label = 0 + for l in reader(): + self.assertEqual(l[0].size, 784) + if l[1] > label: + label = l[1] + sum += 1 + return sum, label + + def test_train(self): + instances, max_label_value = self.check_reader( + paddle.dataset.mnist.train()) + self.assertEqual(instances, 60000) + self.assertEqual(max_label_value, 9) + + def test_test(self): + instances, max_label_value = self.check_reader( + paddle.dataset.mnist.test()) + self.assertEqual(instances, 10000) + self.assertEqual(max_label_value, 9) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/mq2007_test.py b/python/paddle/dataset/tests/mq2007_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fba388724a8e84591df7150b41f8ea39a850fc31 --- /dev/null +++ b/python/paddle/dataset/tests/mq2007_test.py @@ -0,0 +1,33 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.mq2007 +import unittest + + +class TestMQ2007(unittest.TestCase): + def test_pairwise(self): + for label, query_left, query_right in paddle.dataset.mq2007.test( + format="pairwise"): + self.assertEqual(query_left.shape(), (46, )) + self.assertEqual(query_right.shape(), (46, )) + + def test_listwise(self): + for label_array, query_array in paddle.dataset.mq2007.test( + format="listwise"): + self.assertEqual(len(label_array), len(query_array)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/dataset/tests/test_image.py b/python/paddle/dataset/tests/test_image.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd56607ae1998935a3b3aaa0e3279515c2a540c --- /dev/null +++ b/python/paddle/dataset/tests/test_image.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +import paddle.dataset.image as image + + +class Image(unittest.TestCase): + def test_resize_flip_chw(self): + # resize + im = image.load_image('cat.jpg') + im = image.resize_short(im, 256) + self.assertEqual(256, min(im.shape[:2])) + self.assertEqual(3, im.shape[2]) + + # flip + im = image.left_right_flip(im) + im2 = np.flip(im, 1) + self.assertEqual(im.all(), im2.all()) + + # to_chw + h, w, c = im.shape + im = image.to_chw(im) + self.assertEqual(c, im.shape[0]) + self.assertEqual(h, im.shape[1]) + self.assertEqual(w, im.shape[2]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/test_sentiment.py b/python/paddle/dataset/tests/test_sentiment.py new file mode 100644 index 0000000000000000000000000000000000000000..543f4b7378b583ea3857bf785cf330c43e535c2a --- /dev/null +++ b/python/paddle/dataset/tests/test_sentiment.py @@ -0,0 +1,55 @@ +# /usr/bin/env python +# -*- coding:utf-8 -*- + +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import nltk +import paddle.dataset.sentiment as st +from nltk.corpus import movie_reviews + + +class TestSentimentMethods(unittest.TestCase): + def test_get_word_dict(self): + word_dict = st.get_word_dict()[0:10] + test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3), + (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7), + (u'is', 8), (u'in', 9)] + for idx, each in enumerate(word_dict): + self.assertEqual(each, test_word_list[idx]) + self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path) + + def test_sort_files(self): + last_label = '' + for sample_file in st.sort_files(): + current_label = sample_file.split("/")[0] + self.assertNotEqual(current_label, last_label) + last_label = current_label + + def test_data_set(self): + data_set = st.load_sentiment_data() + last_label = -1 + for each in st.test(): + self.assertNotEqual(each[1], last_label) + last_label = each[1] + self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES) + self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES) + self.assertEqual( + len(list(st.test())), + (st.NUM_TOTAL_INSTANCES - st.NUM_TRAINING_INSTANCES)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/voc2012_test.py b/python/paddle/dataset/tests/voc2012_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0d285461a8ae8a9cc69fbec0dcf5efc106b594f0 --- /dev/null +++ b/python/paddle/dataset/tests/voc2012_test.py @@ -0,0 +1,42 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.voc2012 +import unittest + + +class TestVOC(unittest.TestCase): + def check_reader(self, reader): + sum = 0 + label = 0 + for l in reader(): + self.assertEqual(l[0].size, 3 * l[1].size) + sum += 1 + return sum + + def test_train(self): + count = self.check_reader(paddle.dataset.voc_seg.train()) + self.assertEqual(count, 2913) + + def test_test(self): + count = self.check_reader(paddle.dataset.voc_seg.test()) + self.assertEqual(count, 1464) + + def test_val(self): + count = self.check_reader(paddle.dataset.voc_seg.val()) + self.assertEqual(count, 1449) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/dataset/tests/wmt16_test.py b/python/paddle/dataset/tests/wmt16_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8b949d8bf5212d51016a33da322095bde2038200 --- /dev/null +++ b/python/paddle/dataset/tests/wmt16_test.py @@ -0,0 +1,66 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.wmt16 +import unittest + + +class TestWMT16(unittest.TestCase): + def checkout_one_sample(self, sample): + # train data has 3 field: source language word indices, + # target language word indices, and target next word indices. + self.assertEqual(len(sample), 3) + + # test start mark and end mark in source word indices. + self.assertEqual(sample[0][0], 0) + self.assertEqual(sample[0][-1], 1) + + # test start mask in target word indices + self.assertEqual(sample[1][0], 0) + + # test en mask in target next word indices + self.assertEqual(sample[2][-1], 1) + + def test_train(self): + for idx, sample in enumerate( + paddle.dataset.wmt16.train( + src_dict_size=100000, trg_dict_size=100000)()): + if idx >= 10: break + self.checkout_one_sample(sample) + + def test_test(self): + for idx, sample in enumerate( + paddle.dataset.wmt16.test( + src_dict_size=1000, trg_dict_size=1000)()): + if idx >= 10: break + self.checkout_one_sample(sample) + + def test_val(self): + for idx, sample in enumerate( + paddle.dataset.wmt16.validation( + src_dict_size=1000, trg_dict_size=1000)()): + if idx >= 10: break + self.checkout_one_sample(sample) + + def test_get_dict(self): + dict_size = 1000 + word_dict = paddle.dataset.wmt16.get_dict("en", dict_size, True) + self.assertEqual(len(word_dict), dict_size) + self.assertEqual(word_dict[0], "") + self.assertEqual(word_dict[1], "") + self.assertEqual(word_dict[2], "") + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py new file mode 100644 index 0000000000000000000000000000000000000000..6a56e9d5563c76ab6f524ccea9191693dc227010 --- /dev/null +++ b/python/paddle/dataset/uci_housing.py @@ -0,0 +1,125 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +UCI Housing dataset. + +This module will download dataset from +https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and +parse training set and test set into paddle reader creators. +""" + +import numpy as np +import os +import paddle.dataset.common + +__all__ = ['train', 'test'] + +URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' +MD5 = 'd4accdce7a25600298819f8e28e8d593' +feature_names = [ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', + 'PTRATIO', 'B', 'LSTAT', 'convert' +] + +UCI_TRAIN_DATA = None +UCI_TEST_DATA = None +URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar' +MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b' + + +def feature_range(maximums, minimums): + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + fig, ax = plt.subplots() + feature_num = len(maximums) + ax.bar(range(feature_num), maximums - minimums, color='r', align='center') + ax.set_title('feature scale') + plt.xticks(range(feature_num), feature_names) + plt.xlim([-1, feature_num]) + fig.set_figheight(6) + fig.set_figwidth(10) + if not os.path.exists('./image'): + os.makedirs('./image') + fig.savefig('image/ranges.png', dpi=48) + plt.close(fig) + + +def load_data(filename, feature_num=14, ratio=0.8): + global UCI_TRAIN_DATA, UCI_TEST_DATA + if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None: + return + + data = np.fromfile(filename, sep=' ') + data = data.reshape(data.shape[0] / feature_num, feature_num) + maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( + axis=0) / data.shape[0] + feature_range(maximums[:-1], minimums[:-1]) + for i in xrange(feature_num - 1): + data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) + offset = int(data.shape[0] * ratio) + UCI_TRAIN_DATA = data[:offset] + UCI_TEST_DATA = data[offset:] + + +def train(): + """ + UCI_HOUSING training set creator. + + It returns a reader creator, each sample in the reader is features after + normalization and price number. + + :return: Training reader creator + :rtype: callable + """ + global UCI_TRAIN_DATA + load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) + + def reader(): + for d in UCI_TRAIN_DATA: + yield d[:-1], d[-1:] + + return reader + + +def test(): + """ + UCI_HOUSING test set creator. + + It returns a reader creator, each sample in the reader is features after + normalization and price number. + + :return: Test reader creator + :rtype: callable + """ + global UCI_TEST_DATA + load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) + + def reader(): + for d in UCI_TEST_DATA: + yield d[:-1], d[-1:] + + return reader + + +def fetch(): + paddle.dataset.common.download(URL, 'uci_housing', MD5) + + +def convert(path): + """ + Converts dataset to recordio format + """ + paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train") + paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test") diff --git a/python/paddle/dataset/voc2012.py b/python/paddle/dataset/voc2012.py new file mode 100644 index 0000000000000000000000000000000000000000..9c945574dbcc15f5cee370206ed7e70ba8ab5014 --- /dev/null +++ b/python/paddle/dataset/voc2012.py @@ -0,0 +1,85 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image dataset for segmentation. +The 2012 dataset contains images from 2008-2011 for which additional +segmentations have been prepared. As in previous years the assignment +to training/test sets has been maintained. The total number of images +with segmentation has been increased from 7,062 to 9,993. +""" + +import tarfile +import io +import numpy as np +from paddle.dataset.common import download +from paddle.dataset.image import * +from PIL import Image + +__all__ = ['train', 'test', 'val'] + +VOC_URL = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/\ +VOCtrainval_11-May-2012.tar' + +VOC_MD5 = '6cd6e144f989b92b3379bac3b3de84fd' +SET_FILE = 'VOCdevkit/VOC2012/ImageSets/Segmentation/{}.txt' +DATA_FILE = 'VOCdevkit/VOC2012/JPEGImages/{}.jpg' +LABEL_FILE = 'VOCdevkit/VOC2012/SegmentationClass/{}.png' + +CACHE_DIR = 'voc2012' + + +def reader_creator(filename, sub_name): + + tarobject = tarfile.open(filename) + name2mem = {} + for ele in tarobject.getmembers(): + name2mem[ele.name] = ele + + def reader(): + set_file = SET_FILE.format(sub_name) + sets = tarobject.extractfile(name2mem[set_file]) + for line in sets: + line = line.strip() + data_file = DATA_FILE.format(line) + label_file = LABEL_FILE.format(line) + data = tarobject.extractfile(name2mem[data_file]).read() + label = tarobject.extractfile(name2mem[label_file]).read() + data = Image.open(io.BytesIO(data)) + label = Image.open(io.BytesIO(label)) + data = np.array(data) + label = np.array(label) + yield data, label + + return reader + + +def train(): + """ + Create a train dataset reader containing 2913 images in HWC order. + """ + return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'trainval') + + +def test(): + """ + Create a test dataset reader containing 1464 images in HWC order. + """ + return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'train') + + +def val(): + """ + Create a val dataset reader containing 1449 images in HWC order. + """ + return reader_creator(download(VOC_URL, CACHE_DIR, VOC_MD5), 'val') diff --git a/python/paddle/dataset/wmt14.py b/python/paddle/dataset/wmt14.py new file mode 100644 index 0000000000000000000000000000000000000000..f0908c737874fa7335cca5b5f0cba83190c9f90f --- /dev/null +++ b/python/paddle/dataset/wmt14.py @@ -0,0 +1,173 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +WMT14 dataset. +The original WMT14 dataset is too large and a small set of data for set is +provided. This module will download dataset from +http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and +parse training set and test set into paddle reader creators. + +""" +import tarfile +import gzip + +import paddle.dataset.common + +__all__ = [ + 'train', + 'test', + 'get_dict', + 'convert', +] + +URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' + 'cslm_joint_paper/data/dev+test.tgz') +MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' +# this is a small set of data for test. The original data is too large and +# will be add later. +URL_TRAIN = ('http://paddlepaddle.cdn.bcebos.com/demo/' + 'wmt_shrinked_data/wmt14.tgz') +MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' +# BLEU of this trained model is 26.92 +URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz' +MD5_MODEL = '0cb4a5366189b6acba876491c8724fa3' + +START = "" +END = "" +UNK = "" +UNK_IDX = 2 + + +def __read_to_dict(tar_file, dict_size): + def __to_dict(fd, size): + out_dict = dict() + for line_count, line in enumerate(fd): + if line_count < size: + out_dict[line.strip()] = line_count + else: + break + return out_dict + + with tarfile.open(tar_file, mode='r') as f: + names = [ + each_item.name for each_item in f + if each_item.name.endswith("src.dict") + ] + assert len(names) == 1 + src_dict = __to_dict(f.extractfile(names[0]), dict_size) + names = [ + each_item.name for each_item in f + if each_item.name.endswith("trg.dict") + ] + assert len(names) == 1 + trg_dict = __to_dict(f.extractfile(names[0]), dict_size) + return src_dict, trg_dict + + +def reader_creator(tar_file, file_name, dict_size): + def reader(): + src_dict, trg_dict = __read_to_dict(tar_file, dict_size) + with tarfile.open(tar_file, mode='r') as f: + names = [ + each_item.name for each_item in f + if each_item.name.endswith(file_name) + ] + for name in names: + for line in f.extractfile(name): + line_split = line.strip().split('\t') + if len(line_split) != 2: + continue + src_seq = line_split[0] # one source sequence + src_words = src_seq.split() + src_ids = [ + src_dict.get(w, UNK_IDX) + for w in [START] + src_words + [END] + ] + + trg_seq = line_split[1] # one target sequence + trg_words = trg_seq.split() + trg_ids = [trg_dict.get(w, UNK_IDX) for w in trg_words] + + # remove sequence whose length > 80 in training mode + if len(src_ids) > 80 or len(trg_ids) > 80: + continue + trg_ids_next = trg_ids + [trg_dict[END]] + trg_ids = [trg_dict[START]] + trg_ids + + yield src_ids, trg_ids, trg_ids_next + + return reader + + +def train(dict_size): + """ + WMT14 training set creator. + + It returns a reader creator, each sample in the reader is source language + word ID sequence, target language word ID sequence and next word ID + sequence. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), + 'train/train', dict_size) + + +def test(dict_size): + """ + WMT14 test set creator. + + It returns a reader creator, each sample in the reader is source language + word ID sequence, target language word ID sequence and next word ID + sequence. + + :return: Test reader creator + :rtype: callable + """ + return reader_creator( + paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), + 'test/test', dict_size) + + +def gen(dict_size): + return reader_creator( + paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), + 'gen/gen', dict_size) + + +def get_dict(dict_size, reverse=True): + # if reverse = False, return dict = {'a':'001', 'b':'002', ...} + # else reverse = true, return dict = {'001':'a', '002':'b', ...} + tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) + src_dict, trg_dict = __read_to_dict(tar_file, dict_size) + if reverse: + src_dict = {v: k for k, v in src_dict.items()} + trg_dict = {v: k for k, v in trg_dict.items()} + return src_dict, trg_dict + + +def fetch(): + paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) + paddle.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL) + + +def convert(path): + """ + Converts dataset to recordio format + """ + dict_size = 30000 + paddle.dataset.common.convert(path, train(dict_size), 1000, "wmt14_train") + paddle.dataset.common.convert(path, test(dict_size), 1000, "wmt14_test") diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py new file mode 100644 index 0000000000000000000000000000000000000000..ad23338a96df6856c7e15cb5e3bb713021a55bf0 --- /dev/null +++ b/python/paddle/dataset/wmt16.py @@ -0,0 +1,349 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +ACL2016 Multimodal Machine Translation. Please see this website for more +details: http://www.statmt.org/wmt16/multimodal-task.html#task1 + +If you use the dataset created for your task, please cite the following paper: +Multi30K: Multilingual English-German Image Descriptions. + +@article{elliott-EtAl:2016:VL16, + author = {{Elliott}, D. and {Frank}, S. and {Sima"an}, K. and {Specia}, L.}, + title = {Multi30K: Multilingual English-German Image Descriptions}, + booktitle = {Proceedings of the 6th Workshop on Vision and Language}, + year = {2016}, + pages = {70--74}, + year = 2016 +} +""" + +import os +import tarfile +import gzip +from collections import defaultdict + +import paddle.dataset.common + +__all__ = [ + "train", + "test", + "validation", + "convert", + "fetch", + "get_dict", +] + +DATA_URL = ("http://cloud.dlnel.org/filepub/" + "?uuid=46a0808e-ddd8-427c-bacd-0dbc6d045fed") +DATA_MD5 = "0c38be43600334966403524a40dcd81e" + +TOTAL_EN_WORDS = 11250 +TOTAL_DE_WORDS = 19220 + +START_MARK = "" +END_MARK = "" +UNK_MARK = "" + + +def __build_dict(tar_file, dict_size, save_path, lang): + word_dict = defaultdict(int) + with tarfile.open(tar_file, mode="r") as f: + for line in f.extractfile("wmt16/train"): + line_split = line.strip().split("\t") + if len(line_split) != 2: continue + sen = line_split[0] if lang == "en" else line_split[1] + for w in sen.split(): + word_dict[w] += 1 + + with open(save_path, "w") as fout: + fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)) + for idx, word in enumerate( + sorted( + word_dict.iteritems(), key=lambda x: x[1], reverse=True)): + if idx + 3 == dict_size: break + fout.write("%s\n" % (word[0])) + + +def __load_dict(tar_file, dict_size, lang, reverse=False): + dict_path = os.path.join(paddle.dataset.common.DATA_HOME, + "wmt16/%s_%d.dict" % (lang, dict_size)) + if not os.path.exists(dict_path) or ( + len(open(dict_path, "r").readlines()) != dict_size): + __build_dict(tar_file, dict_size, dict_path, lang) + + word_dict = {} + with open(dict_path, "r") as fdict: + for idx, line in enumerate(fdict): + if reverse: + word_dict[idx] = line.strip() + else: + word_dict[line.strip()] = idx + return word_dict + + +def __get_dict_size(src_dict_size, trg_dict_size, src_lang): + src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else + TOTAL_DE_WORDS)) + trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else + TOTAL_ENG_WORDS)) + return src_dict_size, trg_dict_size + + +def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang): + def reader(): + src_dict = __load_dict(tar_file, src_dict_size, src_lang) + trg_dict = __load_dict(tar_file, trg_dict_size, + ("de" if src_lang == "en" else "en")) + + # the indice for start mark, end mark, and unk are the same in source + # language and target language. Here uses the source language + # dictionary to determine their indices. + start_id = src_dict[START_MARK] + end_id = src_dict[END_MARK] + unk_id = src_dict[UNK_MARK] + + src_col = 0 if src_lang == "en" else 1 + trg_col = 1 - src_col + + with tarfile.open(tar_file, mode="r") as f: + for line in f.extractfile(file_name): + line_split = line.strip().split("\t") + if len(line_split) != 2: + continue + src_words = line_split[src_col].split() + src_ids = [start_id] + [ + src_dict.get(w, unk_id) for w in src_words + ] + [end_id] + + trg_words = line_split[trg_col].split() + trg_ids = [trg_dict.get(w, unk_id) for w in trg_words] + + trg_ids_next = trg_ids + [end_id] + trg_ids = [start_id] + trg_ids + + yield src_ids, trg_ids, trg_ids_next + + return reader + + +def train(src_dict_size, trg_dict_size, src_lang="en"): + """ + WMT16 train set reader. + + This function returns the reader for train data. Each sample the reader + returns is made up of three fields: the source language word index sequence, + target language word index sequence and next word index sequence. + + + NOTE: + The original like for training data is: + http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz + + paddle.dataset.wmt16 provides a tokenized version of the original dataset by + using moses's tokenization script: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + + Args: + src_dict_size(int): Size of the source language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + trg_dict_size(int): Size of the target language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + src_lang(string): A string indicating which language is the source + language. Available options are: "en" for English + and "de" for Germany. + + Returns: + callable: The train reader. + """ + + if src_lang not in ["en", "de"]: + raise ValueError("An error language type. Only support: " + "en (for English); de(for Germany).") + src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, + src_lang) + + return reader_creator( + tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, + "wmt16.tar.gz"), + file_name="wmt16/train", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang) + + +def test(src_dict_size, trg_dict_size, src_lang="en"): + """ + WMT16 test set reader. + + This function returns the reader for test data. Each sample the reader + returns is made up of three fields: the source language word index sequence, + target language word index sequence and next word index sequence. + + NOTE: + The original like for test data is: + http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz + + paddle.dataset.wmt16 provides a tokenized version of the original dataset by + using moses's tokenization script: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + + Args: + src_dict_size(int): Size of the source language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + trg_dict_size(int): Size of the target language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + src_lang(string): A string indicating which language is the source + language. Available options are: "en" for English + and "de" for Germany. + + Returns: + callable: The test reader. + """ + + if src_lang not in ["en", "de"]: + raise ValueError("An error language type. " + "Only support: en (for English); de(for Germany).") + + src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, + src_lang) + + return reader_creator( + tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, + "wmt16.tar.gz"), + file_name="wmt16/test", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang) + + +def validation(src_dict_size, trg_dict_size, src_lang="en"): + """ + WMT16 validation set reader. + + This function returns the reader for validation data. Each sample the reader + returns is made up of three fields: the source language word index sequence, + target language word index sequence and next word index sequence. + + NOTE: + The original like for validation data is: + http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz + + paddle.dataset.wmt16 provides a tokenized version of the original dataset by + using moses's tokenization script: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl + + Args: + src_dict_size(int): Size of the source language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + trg_dict_size(int): Size of the target language dictionary. Three + special tokens will be added into the dictionary: + for start mark, for end mark, and for + unknown word. + src_lang(string): A string indicating which language is the source + language. Available options are: "en" for English + and "de" for Germany. + + Returns: + callable: The validation reader. + """ + if src_lang not in ["en", "de"]: + raise ValueError("An error language type. " + "Only support: en (for English); de(for Germany).") + src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, + src_lang) + + return reader_creator( + tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, + "wmt16.tar.gz"), + file_name="wmt16/val", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang) + + +def get_dict(lang, dict_size, reverse=False): + """ + return the word dictionary for the specified language. + + Args: + lang(string): A string indicating which language is the source + language. Available options are: "en" for English + and "de" for Germany. + dict_size(int): Size of the specified language dictionary. + reverse(bool): If reverse is set to False, the returned python + dictionary will use word as key and use index as value. + If reverse is set to True, the returned python + dictionary will use index as key and word as value. + + Returns: + dict: The word dictionary for the specific language. + """ + + if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS) + else: dict_size = min(dict_size, TOTAL_DE_WORDS) + + dict_path = os.path.join(paddle.dataset.common.DATA_HOME, + "wmt16/%s_%d.dict" % (lang, dict_size)) + assert os.path.exists(dict_path), "Word dictionary does not exist. " + "Please invoke paddle.dataset.wmt16.train/test/validation first " + "to build the dictionary." + tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz") + return __load_dict(tar_file, dict_size, lang, reverse) + + +def fetch(): + """download the entire dataset. + """ + paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, + "wmt16.tar.gz") + + +def convert(path, src_dict_size, trg_dict_size, src_lang): + """Converts dataset to recordio format. + """ + + paddle.dataset.common.convert( + path, + train( + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang), + 1000, + "wmt16_train") + paddle.dataset.common.convert( + path, + test( + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang), + 1000, + "wmt16_test") + paddle.dataset.common.convert( + path, + validation( + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang), + 1000, + "wmt16_validation") diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index fcea28220485039c9daf3c5fa2688c31f9f34c42..f757411b853bacb9e03fc42fa2ef6593c3cde00f 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -29,9 +29,10 @@ import optimizer import backward import regularizer import average +import metrics from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder -from core import LoDTensor, CPUPlace, CUDAPlace +from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler from concurrency import (Go, make_channel, channel_send, channel_recv, @@ -41,6 +42,7 @@ from memory_optimization_transpiler import memory_optimize, release_memory import profiler import unique_name import recordio_writer +from parallel_executor import ParallelExecutor Tensor = LoDTensor @@ -56,6 +58,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'LoDTensor', 'CPUPlace', 'CUDAPlace', + 'CUDAPinnedPlace', 'Tensor', 'ParamAttr', 'WeightNormParamAttr', @@ -68,6 +71,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'profiler', 'unique_name', 'recordio_writer', + 'ParallelExecutor', ] @@ -82,6 +86,8 @@ def __bootstrap__(): import core import os + in_test = 'unittest' in sys.modules + try: num_threads = int(os.getenv('OMP_NUM_THREADS', '1')) except ValueError: @@ -106,8 +112,11 @@ def __bootstrap__(): core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) - core.init_devices() + # don't init_p2p when in unittest to save time. + core.init_devices(not in_test) +# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. +# Consider paddle.init(args) or paddle.main(args) layers.monkey_patch_variable() __bootstrap__() diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index ded6eb085968343fcdc9f6e4b8353c08408df426..6abe8233b07c484494848c566e9898600a7d8f5c 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -13,6 +13,7 @@ # limitations under the License. import numpy as np +import warnings """ Class of all kinds of Average. @@ -22,6 +23,8 @@ import numpy as np wrappers of Python functions. """ +__all__ = ["WeightedAverage"] + def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( @@ -34,6 +37,9 @@ def _is_number_or_matrix_(var): class WeightedAverage(object): def __init__(self): + warnings.warn( + "The %s is deprecated, please use fluid.metrics.Accuracy instead." % + (self.__class__.__name__), Warning) self.reset() def reset(self): diff --git a/python/paddle/fluid/debuger.py b/python/paddle/fluid/debuger.py index 7b4afa9bf65e1369329cd4648c1f5c4bd8fa8357..1c56064a1e8bdc5d975837cb5a75a40d557765ad 100644 --- a/python/paddle/fluid/debuger.py +++ b/python/paddle/fluid/debuger.py @@ -16,6 +16,7 @@ import sys import re from graphviz import GraphPreviewGenerator import proto.framework_pb2 as framework_pb2 +from google.protobuf import text_format _vartype2str_ = [ "UNK", @@ -100,7 +101,7 @@ def repr_var(vardesc): def pprint_program_codes(program_desc): reprs = [] - for block_idx in range(program_desc.num_blocks()): + for block_idx in range(program_desc.desc.num_blocks()): block_desc = program_desc.block(block_idx) block_repr = pprint_block_codes(block_desc) reprs.append(block_repr) @@ -127,7 +128,7 @@ def pprint_block_codes(block_desc, show_backward=False): if type(block_desc) is not framework_pb2.BlockDesc: block_desc = framework_pb2.BlockDesc.FromString( - block_desc.serialize_to_string()) + block_desc.desc.serialize_to_string()) var_reprs = [] op_reprs = [] for var in block_desc.vars: @@ -237,13 +238,13 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): # draw parameters and args vars = {} for var in desc.vars: - shape = [str(i) for i in var.lod_tensor.tensor.dims] - if not shape: - shape = ['null'] + # TODO(gongwb): format the var.type # create var if var.persistable: varn = graph.add_param( - var.name, var.type, shape, highlight=need_highlight(var.name)) + var.name, + str(var.type).replace("\n", "
", 1), + highlight=need_highlight(var.name)) else: varn = graph.add_arg(var.name, highlight=need_highlight(var.name)) vars[var.name] = varn @@ -268,4 +269,4 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): for var in op.outputs: add_op_link_var(opn, var, True) - graph(path, show=True) + graph(path, show=False) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 62147d325b699a62bd39cfbaca44874b7fc19a0f..aa15392d7e4901e8ee23ad5b4370542232adc2a5 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -13,14 +13,17 @@ # limitations under the License. from __future__ import print_function -import framework -from framework import Program, default_main_program, default_startup_program, Parameter, Variable -import optimizer -from layer_helper import LayerHelper -from distributed_spliter import * + import math + +import distributed_splitter as splitter +import framework +from framework import Program, default_main_program, Variable from . import core -import debuger + +LOOKUP_TABLE_TYPE = "lookup_table" +LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" +RPC_CLIENT_VAR_NAME = "RPC_CLIENT_VAR" class VarBlock: @@ -35,9 +38,9 @@ class VarBlock: class UnionFind(object): - """ Union-find data struct. - - Union-find is a data struct that keeps track of a set of elements partitioned + """ Union-find data structure. + + Union-find is a data structure that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. Reference: @@ -102,6 +105,8 @@ def split_dense_variable(var_list, the parameter server side can gain better performance. By default minimum block size is 1024. The max block size is used to prevent very large blocks that may cause send error. + :return: A list of VarBlocks. Each VarBlock specifies a shard of + the var. """ blocks = [] for var in var_list: @@ -138,7 +143,7 @@ class DistributeTranspiler: program=None, pservers="127.0.0.1:6174", trainers=1, - split_method=round_robin): + split_method=splitter.round_robin): """ Transpile the program to distributed data-parallelism programs. The main_program will be transformed to use a remote parameter server @@ -183,31 +188,80 @@ class DistributeTranspiler: assert (callable(split_method)) if program is None: program = default_main_program() - self.program = program - self.trainers = trainers + self.origin_program = program + self.trainer_num = trainers self.optimize_ops = optimize_ops # TODO(typhoonzero): currently trainer_id is fetched from cluster system # like Kubernetes, we should port this to use etcd later when developing # fluid distributed training with fault-tolerance. self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") + self.pserver_endpoints = pserver_endpoints + + # process lookup_table_op + # 1. check all lookup_table_op is distributed + # 2. check all lookup_table_op share the same table. + distributed_lookup_table_ops = [] + # support only one distributed_lookup_table now + self.table_name = None + for op in program.global_block().ops: + if op.type == LOOKUP_TABLE_TYPE: + if op.attrs['is_distributed'] is True: + if self.table_name is None: + self.table_name = op.input("W")[0] + if self.table_name != op.input("W")[0]: + raise RuntimeError("all distributed lookup_table_ops" + " should have only one table") + distributed_lookup_table_ops.append(op) + else: + if self.table_name is not None: + assert op.input("W")[0] != self.table_name - # step1 + self.has_distributed_lookup_table = len( + distributed_lookup_table_ops) > 0 + + # step1: For large parameters and gradients, split them into smaller + # blocks. param_list = [pg[0] for pg in params_grads] grad_list = [pg[1] for pg in params_grads] + + if self.has_distributed_lookup_table: + param_list = [ + param for param in param_list if param.name != self.table_name + ] + grad_list = [ + grad for grad in grad_list + if grad.name != framework.grad_var_name(self.table_name) + ] + self.table_param_grad = [ + param_grad for param_grad in params_grads + if param_grad[0].name == self.table_name + ][0] + table_grad_var = self.table_param_grad[1] + self.table_grad_list = [ + program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, trainer_id, index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) - # step2 + # step2: Create new vars for the parameters and gradients blocks and + # add ops to do the split. grad_var_mapping = self._append_split_op(program, grad_blocks) - # step3 + param_var_mapping = self._create_vars_from_blocklist(program, + param_blocks) + # step3: Add gradients as send op inputs and parameters as send + # op outputs. send_inputs = [] send_outputs = [] for b in grad_blocks: # append by order varname, block_id, _ = b.split(":") send_inputs.append(grad_var_mapping[varname][int(block_id)]) - - param_var_mapping = self._create_vars_from_blocklist(program, - param_blocks) for b in param_blocks: varname, block_id, _ = b.split(":") send_outputs.append(param_var_mapping[varname][int(block_id)]) @@ -225,7 +279,7 @@ class DistributeTranspiler: self.param_grad_ep_mapping[ep]["grads"].append(grad) rpc_client_var = program.global_block().create_var( - name="RPC_CLIENT_VAR", + name=RPC_CLIENT_VAR_NAME, persistable=True, type=core.VarDesc.VarType.RAW) @@ -237,7 +291,7 @@ class DistributeTranspiler: "RPCClient": rpc_client_var}, attrs={"endpoints": pserver_endpoints, "epmap": eplist}) - # step4 + # step4: Concat the parameters splits together after recv. for varname, splited_var in param_var_mapping.iteritems(): if len(splited_var) <= 1: continue @@ -248,23 +302,31 @@ class DistributeTranspiler: outputs={"Out": [orig_param]}, attrs={"axis": 0}) + if self.has_distributed_lookup_table: + self._replace_lookup_table_op_with_prefetch(program, rpc_client_var, + eplist) + self._split_table_grad_and_add_send_vars(program, rpc_client_var, + pserver_endpoints) + def get_trainer_program(self): # remove optimize ops and add a send op to main_program - self.program.global_block().delete_ops(self.optimize_ops) + self.origin_program.global_block().delete_ops(self.optimize_ops) + self.origin_program.sync_with_cpp() # FIXME(typhoonzero): serialize once will fix error occurs when clone. - self.program.__str__() - return self.program + self.origin_program.__str__() + return self.origin_program def get_pserver_program(self, endpoint): """ Get pserver side program using the endpoint. + TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers. NOTE: assume blocks of the same variable is not distributed on the same pserver, only change param/grad varnames for trainers to fetch. """ # step1 pserver_program = Program() - # step2 + # step2: Create vars to receive vars at parameter servers. recv_inputs = [] for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) @@ -273,32 +335,41 @@ class DistributeTranspiler: # we don't need to create them when grad arrives. # change client side var name to origin name by # removing ".trainer_%d" suffix + suff_idx = v.name.find(".trainer_") if suff_idx >= 0: orig_var_name = v.name[:suff_idx] - pserver_program.global_block().create_var( - name=orig_var_name, - persistable=True, - type=v.type, - dtype=v.dtype, - shape=v.shape) - for trainer_id in xrange(self.trainers): - var = pserver_program.global_block().create_var( - name="%s.trainer_%d" % (orig_var_name, trainer_id), - persistable=False, + else: + orig_var_name = v.name + # NOTE: single_trainer_var must be created for multi-trainer + # case to merge grads from multiple trainers + single_trainer_var = \ + pserver_program.global_block().create_var( + name=orig_var_name, + persistable=True, type=v.type, dtype=v.dtype, shape=v.shape) - recv_inputs.append(var) + if self.trainer_num > 1: + for trainer_id in xrange(self.trainer_num): + var = pserver_program.global_block().create_var( + name="%s.trainer_%d" % (orig_var_name, trainer_id), + persistable=False, + type=v.type, + dtype=v.dtype, + shape=v.shape) + recv_inputs.append(var) + else: + recv_inputs.append(single_trainer_var) # step3 optimize_block = pserver_program.create_block(0) # step 4 - # Create a union-find data struct from optimize ops, + # Create a union-find data structure from optimize ops, # If two ops are connected, we could add these two ops # into one set. ufind = self._create_ufind(self.optimize_ops) - # step 4.2 + # step 4.2 # Iterate through the ops and append optimize op which # located on current pserver opt_op_on_pserver = [] @@ -307,7 +378,7 @@ class DistributeTranspiler: opt_op_on_pserver.append(op) # step 4.3 # Iterate through the ops, and if an op and the optimize ops - # which located on current pserver are in one set, then + # which located on current pserver are in one set, then # append it into the sub program. # We try to put optimization program run parallelly, assume @@ -338,15 +409,24 @@ class DistributeTranspiler: else: self._append_pserver_non_opt_ops(block, op) + append_block = optimize_block + # append lr decay ops to the child block if exists + lr_ops = self._get_lr_ops() + if len(lr_ops) > 0: + for _, op in enumerate(lr_ops): + self._append_pserver_non_opt_ops(append_block, op) + + append_block = pserver_program.create_block(append_block.idx) + # append op to the current block - per_opt_block = optimize_block + per_opt_block = append_block for _, opt_op in enumerate(opt_op_on_pserver): for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and \ op not in global_ops: __append_optimize_op__(op, per_opt_block) - per_opt_block = pserver_program.create_block(0) + per_opt_block = pserver_program.create_block(append_block.idx) # append global ops for glb_op in global_ops: @@ -360,6 +440,23 @@ class DistributeTranspiler: # __append_optimize_op__(glb_op, optimize_block) # break + # process distributed lookup_table + prefetch_block = None + if self.has_distributed_lookup_table: + pserver_index = self.pserver_endpoints.index(endpoint) + self._create_table_optimize_block(pserver_index, pserver_program, + append_block) + prefetch_block = self._create_prefetch_block( + pserver_index, pserver_program, optimize_block) + + # NOTE: if has_distributed_lookup_table is False, then prefetch_block will + # not be executed, so it's safe to use optimize_block to hold the place + if self.has_distributed_lookup_table: + assert prefetch_block is not None + else: + assert prefetch_block is None + prefetch_block = pserver_program.global_block() + # step5 append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", @@ -368,8 +465,10 @@ class DistributeTranspiler: attrs={ "OptimizeBlock": optimize_block, "endpoint": endpoint, - "Fanin": self.trainers + "Fanin": self.trainer_num, + "PrefetchBlock": prefetch_block }) + pserver_program.sync_with_cpp() return pserver_program @@ -394,11 +493,7 @@ class DistributeTranspiler: pserver_vars = pserver_program.global_block().vars created_var_map = dict() for _, var in pserver_vars.iteritems(): - tmpvar = s_prog.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + tmpvar = s_prog.global_block().clone_variable(var) created_var_map[var.name] = tmpvar # 2. rename op outputs @@ -431,14 +526,207 @@ class DistributeTranspiler: attrs=op.attrs) return s_prog + # transpiler function for dis lookup_table + def _replace_lookup_table_op_with_prefetch(self, program, rpc_client_var, + eplist): + # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op + self.prefetch_input_vars = None + self.prefetch_output_vars = None + + continue_search_lookup_table_op = True + while continue_search_lookup_table_op: + continue_search_lookup_table_op = False + all_ops = program.global_block().ops + for op in all_ops: + if op.type == LOOKUP_TABLE_TYPE: + continue_search_lookup_table_op = True + + op_index = list(all_ops).index(op) + ids_name = op.input("Ids") + out_name = op.output("Out") + + if self.prefetch_input_vars is None: + ids_var = program.global_block().vars[ids_name[0]] + self.prefetch_input_vars = self.create_splited_vars( + source_var=ids_var, + block=program.global_block(), + tag="_prefetch_in_") + if self.prefetch_output_vars is None: + out_var = program.global_block().vars[out_name[0]] + self.prefetch_output_vars = self.create_splited_vars( + source_var=out_var, + block=program.global_block(), + tag="_prefetch_out_") + + # insert split_ids_op + program.global_block().insert_op( + index=op_index, + type="split_ids", + inputs={ + 'Ids': [ + program.global_block().vars[varname] + for varname in ids_name + ] + }, + outputs={"Out": self.prefetch_input_vars}) + + # insert prefetch_op + program.global_block().insert_op( + index=op_index + 1, + type="prefetch", + inputs={'X': self.prefetch_input_vars}, + outputs={ + "Out": self.prefetch_output_vars, + "RPCClient": rpc_client_var + }, + attrs={"epmap": eplist}) + + # insert concat_op + program.global_block().insert_op( + index=op_index + 2, + type="concat", + inputs={'X': self.prefetch_output_vars}, + outputs={ + "Out": [ + program.global_block().vars[varname] + for varname in out_name + ] + }, + attrs={"axis": 0}) + + # delete lookup_table_op + program.global_block().delete_ops([op]) + program.sync_with_cpp() + # break for loop + break + + def _split_table_grad_and_add_send_vars(self, program, rpc_client_var, + pserver_endpoints): + # 2. add split_ids_op and send_vars_op to send gradient to pservers + # there should only be one table_name + all_ops = program.global_block().ops + table_grad_name = framework.grad_var_name(self.table_name) + for op in all_ops: + if table_grad_name in op.output_arg_names: + op_index = list(all_ops).index(op) + # insert split_ids_op + program.global_block().insert_op( + index=op_index + 1, + type="split_ids", + inputs={ + 'Ids': [program.global_block().vars[table_grad_name]] + }, + outputs={"Out": self.table_grad_list}) + program.global_block().insert_op( + index=op_index + 2, + type="send_vars", + inputs={'X': self.table_grad_list}, + outputs={"RPCClient": rpc_client_var}, + attrs={"sync_send": True, + "epmap": pserver_endpoints}) + break + + def _create_prefetch_block(self, pserver_index, pserver_program, + optimize_block): + # STEP: create prefetch block + table_var = pserver_program.global_block().vars[self.table_name] + prefetch_block = pserver_program.create_block(optimize_block.idx) + trainer_ids = self.prefetch_input_vars[pserver_index] + pserver_ids = pserver_program.global_block().create_var( + name=trainer_ids.name, + type=trainer_ids.type, + shape=trainer_ids.shape, + dtype=trainer_ids.dtype) + trainer_out = self.prefetch_output_vars[pserver_index] + pserver_out = pserver_program.global_block().create_var( + name=trainer_out.name, + type=trainer_out.type, + shape=trainer_out.shape, + dtype=trainer_out.dtype) + prefetch_block.append_op( + type=LOOKUP_TABLE_TYPE, + inputs={'Ids': pserver_ids, + "W": table_var}, + outputs={"Out": pserver_out}, + attrs={ + "is_sparse": True, # has no effect on lookup_table op + "is_distributed": True, + "padding_idx": -1 + }) + return prefetch_block + + def _create_table_optimize_block(self, pserver_index, pserver_program, + append_block): + def _clone_var(block, var, persistable=True): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=persistable) + + # STEP: create table optimize block + # create table param and grad var in pserver program + param_var = _clone_var( + pserver_program.global_block(), + self.origin_program.global_block().vars[self.table_name]) + grad_var = _clone_var( + pserver_program.global_block(), + self.origin_program.global_block().vars[framework.grad_var_name( + self.table_name)], + persistable=False) + + # create grad vars in pserver program + table_grad_var = self.table_param_grad[1] + table_grad_list = [ + pserver_program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, index, pserver_index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) for index in range(self.trainer_num) + ] + + # create table optimize block in pserver program + table_opt_op = [ + op for op in self.optimize_ops + if op.input("Param")[0] == self.table_name + ][0] + table_opt_block = pserver_program.create_block(append_block.idx) + # only support sgd now + assert table_opt_op.type == "sgd" + + # append sum op for table_grad_list + table_opt_block.append_op( + type="sum", + inputs={"X": table_grad_list}, + outputs={"Out": [grad_var]}) + + lr_var = pserver_program.global_block().vars[table_opt_op.input( + "LearningRate")[0]] + inputs = { + "Param": [param_var], + "Grad": [grad_var], + "LearningRate": [lr_var] + } + outputs = {"ParamOut": [param_var]} + table_opt_block.append_op( + type=table_opt_op.type, + inputs=inputs, + outputs=outputs, + attrs=table_opt_op.attrs) + # ====================== private transpiler functions ===================== def _create_vars_from_blocklist(self, program, block_list, add_trainer_suffix=False): """ + Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. + :return: A dict mapping from original var name to each var split. """ block_map = dict() var_mapping = dict() @@ -490,7 +778,17 @@ class DistributeTranspiler: program.global_block().sync_with_cpp() return var_mapping - def _clone_var(self, block, var): + def create_splited_vars(self, source_var, block, tag): + return [ + block.create_var( + name=str(source_var.name + tag + str(index)), + type=source_var.type, + shape=source_var.shape, + dtype=source_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + + def _clone_var(self, block, var, persistable=True): assert isinstance(var, Variable) return block.create_var( name=var.name, @@ -498,12 +796,15 @@ class DistributeTranspiler: dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=True) + persistable=persistable) def _append_split_op(self, program, gradblocks): # Split variables that need to be split and append respective ops + add_suffix = False + if self.trainer_num > 1: + add_suffix = True var_mapping = self._create_vars_from_blocklist( - program, gradblocks, add_trainer_suffix=True) + program, gradblocks, add_trainer_suffix=add_suffix) for varname, splited_vars in var_mapping.iteritems(): # variable that don't need to split have empty splited_vars if len(splited_vars) <= 1: @@ -591,9 +892,9 @@ class DistributeTranspiler: return merged_var = \ pserver_block.vars[self._orig_varname(grad_block.name)] - if self.trainers > 1: + if self.trainer_num > 1: vars2merge = [] - for i in xrange(self.trainers): + for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ (self._orig_varname(grad_block.name), i) vars2merge.append(pserver_block.vars[per_trainer_name]) @@ -602,12 +903,13 @@ class DistributeTranspiler: type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) + # TODO(panyx0718): What if it's SELECTED_ROWS. if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: optimize_block.append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainers)}) + attrs={"scale": 1.0 / float(self.trainer_num)}) new_inputs[key] = merged_var elif key == "Param": # param is already created on global program @@ -625,7 +927,7 @@ class DistributeTranspiler: shape=param_block.shape) new_inputs[key] = tmpvar elif key == "LearningRate": - # leraning rate variable has already be created by non-optimize op, + # learning rate variable has already be created by non-optimize op, # don't create it once again. lr_varname = opt_op.input(key)[0] if pserver_block.vars.has_key(lr_varname): @@ -643,7 +945,7 @@ class DistributeTranspiler: new_shape = None if key in ["Param", "Grad", "LearningRate"]: continue - var = self.program.global_block().vars[opt_op.input(key)[0]] + var = self.origin_program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape param_shape = new_inputs["Param"].shape new_shape = self._get_optimizer_input_shape(opt_op.type, key, @@ -656,8 +958,8 @@ class DistributeTranspiler: new_inputs[key] = tmpvar # change output's ParamOut variable - outputs = self._get_output_map_from_op(self.program.global_block().vars, - opt_op) + outputs = self._get_output_map_from_op( + self.origin_program.global_block().vars, opt_op) outputs["ParamOut"] = new_inputs["Param"] optimize_block.append_op( @@ -669,8 +971,8 @@ class DistributeTranspiler: def _append_pserver_non_opt_ops(self, optimize_block, opt_op): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated - inputs = self._get_input_map_from_op(self.program.global_block().vars, - opt_op) + inputs = self._get_input_map_from_op( + self.origin_program.global_block().vars, opt_op) for varlist in inputs.itervalues(): if not isinstance(varlist, list): varlist = [varlist] @@ -683,19 +985,15 @@ class DistributeTranspiler: dtype=var.dtype, shape=var.shape) - outputs = self._get_output_map_from_op(self.program.global_block().vars, - opt_op) + outputs = self._get_output_map_from_op( + self.origin_program.global_block().vars, opt_op) for varlist in outputs.itervalues(): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: - program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + program.global_block().clone_variable(var) optimize_block.append_op( type=opt_op.type, @@ -743,7 +1041,7 @@ class DistributeTranspiler: def _is_opt_op(self, op): # NOTE: It's a HACK implement. - # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... + # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... if "Param" in op.input_names and \ "LearningRate" in op.input_names: return True @@ -761,9 +1059,9 @@ class DistributeTranspiler: if same_or_split_var(n, param) and n != param: return True return False - return False def _get_input_map_from_op(self, varmap, op): + """Returns a dict from op input name to the vars in varmap.""" iomap = dict() for key in op.input_names: vars = [] @@ -776,6 +1074,7 @@ class DistributeTranspiler: return iomap def _get_output_map_from_op(self, varmap, op): + """Returns a dict from op output name to the vars in varmap.""" iomap = dict() for key in op.output_names: vars = [] @@ -786,3 +1085,36 @@ class DistributeTranspiler: else: iomap[key] = vars return iomap + + def _get_lr_ops(self): + lr_ops = [] + # find learning rate variables by optimize op + lr_vars = set() + for op in self.optimize_ops: + if self._is_opt_op(op): + lr_vars.add(op.input("LearningRate")[0]) + + find_ops = [] + # find ops which output is lr var + block = self.origin_program.global_block() + for op in block.ops: + if set(op.output_arg_names) & lr_vars: + find_ops.append(op) + # make a union find struct by the ops in default_main_program + ufind = UnionFind(block.ops) + + for op1 in block.ops: + for op2 in block.ops: + # NOTE: we need to skip all optimize ops, since it is connected + # with forward/backward ops and lr ops, we only need the lr ops. + if op1 != op2 and self._is_op_connected(op1, op2) and \ + not self._is_opt_op(op1) and not self._is_opt_op(op2): + ufind.union(op1, op2) + # find all ops which is related with lr var + for op1 in block.ops: + for op2 in find_ops: + if ufind.is_connected(op1, op2): + lr_ops.append(op1) + # we only need to append op for once + break + return lr_ops diff --git a/python/paddle/fluid/distributed_spliter.py b/python/paddle/fluid/distributed_splitter.py similarity index 78% rename from python/paddle/fluid/distributed_spliter.py rename to python/paddle/fluid/distributed_splitter.py index d288b27ba00970897d8121b82a9d51d5cf4ece09..060c1df8ad2badc5132f45ff0f44d136d828faa1 100644 --- a/python/paddle/fluid/distributed_spliter.py +++ b/python/paddle/fluid/distributed_splitter.py @@ -17,8 +17,10 @@ def hash_name(varlist, pserver_endpoints): """ hash variable names to several endpoints. - :param varlist: a list of Variables - :return: a map of pserver endpoint -> varname + Args: + varlist(list): a list of Variables + + Returns(dict): a map of pserver endpoint -> varname """ def _hash_block(block_str, total): @@ -34,9 +36,14 @@ def hash_name(varlist, pserver_endpoints): def round_robin(varlist, pserver_endpoints): """ - distribute variables to several endpoints. + Distribute variables to several endpoints. + Args: + varlist(list): a list of variables + pserver_endpoints(list): a list of pserver endpoints + + Returns(list[int]): the endpoint for each variable """ - assert (len(varlist) > len(pserver_endpoints)) + assert (len(varlist) >= len(pserver_endpoints)) eplist = [] pserver_idx = 0 diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index 19e5b61b0b32aba3fe1e7805704a3740e3854fc8..13475025b5c2a759779066f9d511ed8a786118d5 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings import numpy as np import layers @@ -59,6 +60,9 @@ class Evaluator(object): """ def __init__(self, name, **kwargs): + warnings.warn( + "The %s is deprecated, because maintain a modified program inside evaluator cause bug easily, please use fluid.metrics.%s instead." + % (self.__class__.__name__, self.__class__.__name__), Warning) self.states = [] self.metrics = [] self.helper = LayerHelper(name, **kwargs) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 2612fb1ae41986ae0d5c6e942cc3accebcb00e19..54d0a12bcdbb1b6c13e584dd1a3a5d73cddd4af7 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -48,8 +48,7 @@ def as_numpy(tensor): assert isinstance(tensor, core.LoDTensor) lod = tensor.lod() if len(lod) > 0: - raise RuntimeError( - "Some of your featched tensors hold LoD information. \ + raise RuntimeError("Some of your fetched tensors hold LoD information. \ They can not be completely cast to Python ndarray. \ Please set the parameter 'return_numpy' as 'False' to \ return LoDTensor itself directly.") @@ -180,60 +179,24 @@ def get_program_cache_key(feed, fetch_list): class Executor(object): - def __init__(self, places): - if not isinstance(places, list) and not isinstance(places, tuple): - places = [places] - - act_places = [] - for each in places: - p = core.Place() - p.set_place(each) - act_places.append(p) - - # TODO(dzhwinter) : only use the first place - self.executor = core.Executor(act_places[0]) - self.places = places + def __init__(self, place): + self.place = place + p = core.Place() + p.set_place(place) + self.executor = core.Executor(p) self.program_caches = dict() - def aslodtensor(self, data): - def accumulate(data): - if not isinstance(data, list): - return 1 - return sum([accumulate(sub) for sub in data]) - - def parselod(data): - seq_lens = [accumulate(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - return lod - - assert len(self.places) != 0 - if not isinstance(data, list): - # pure tensor case - tensor = core.LoDTensor() - tensor.set(data, self.places[0]) - return tensor - else: - raise RuntimeError("Current implementation lacks unittests") - # lodtensor case - lod = [] - if not isinstance(data[0], list): - lod.append(parselod(data)) - flattened_data = np.concatenate(data, axis=0).astype("int64") - else: - while isinstance(data[0], list): - lod.append(parselod(seq)) - flattened_data = [item for seq in data for item in seq] - data = flattened_data - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - tensor = core.LoDTensor() - tensor.set(flattened_data, self.places[0]) - tensor.set_lod(lod) - return tensor + def as_lodtensor(self, data): + if isinstance(data, list): + raise RuntimeError("Some of your feed data hold LoD information. \ + They can not be completely cast from a list of Python \ + ndarray to LoDTensor. Please convert data to LoDTensor \ + directly before feeding the data.\ + ") + # single tensor case + tensor = core.LoDTensor() + tensor.set(data, self.place) + return tensor def _get_program_cache(self, program_cache_key): return self.program_caches.get(program_cache_key, None) @@ -293,7 +256,7 @@ class Executor(object): feed_target_name = op.desc.output('Out')[0] cur_feed = feed[feed_target_name] if not isinstance(cur_feed, core.LoDTensor): - cur_feed = self.aslodtensor(cur_feed) + cur_feed = self.as_lodtensor(cur_feed) idx = op.desc.attr('col') core.set_feed_variable(scope, cur_feed, feed_var_name, idx) else: diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 3e78788f470556d2196b5104f69a0a3285543ec4..4b841ef31dcb67ab660475cf6e231fd8a4ae83d6 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -640,12 +640,26 @@ class Operator(object): """ return self.desc.block_attr(name) + def all_attrs(self): + """ + Get the attribute dict + Returns(dict): The Operator's attribute dict + """ + attr_names = self.attr_names + attr_map = {} + for n in attr_names: + if n == 'sub_block': + attr_map[n] = self.block_attr(n) + else: + attr_map[n] = self.attr(n) + return attr_map + class Block(object): def __init__(self, program, idx): self.desc = program.desc.block(idx) self.vars = dict() # var_name --> var - self.ops = collections.deque() # operator list + self.ops = list() # operator list self.program = program self.removed_vars = dict() @@ -804,6 +818,11 @@ class Block(object): del self.vars[name] self.sync_with_cpp() + def remove_var(self, name): + self.sync_with_cpp() + self.desc.remove_var(name) + del self.vars[name] + def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) @@ -817,6 +836,18 @@ class Block(object): self.ops.append(op) return op + def insert_op(self, index, *args, **kwargs): + self.sync_with_cpp() + op_desc = self.desc.insert_op(index) + op = Operator(block=self, desc=op_desc, *args, **kwargs) + self.ops.insert(index, op) + return op + + def remove_op(self, index): + self.sync_with_cpp() + self.desc.remove_op(index, index + 1) + del self.ops[index] + def delete_ops(self, ops): # remove from cpp # FIXME(typhoonzero): remove only the first occurrence. @@ -825,20 +856,21 @@ class Block(object): end = list(self.ops).index(ops[-1]) except Exception, e: raise e + self.desc.remove_op(start, end + 1) def slice_ops(self, start, end): - return list(self.ops)[start:end] + return self.ops[start:end] def prepend_op(self, *args, **kwargs): op_desc = self.desc.prepend_op() op = Operator(self, op_desc, *args, **kwargs) - self.ops.appendleft(op) + self.ops.insert(0, op) return op def sync_with_cpp(self): """ - Sync with the desc on the c++ end. + Sync from the desc on the c++ end. This method is used to synchronize the c++ desc instance generated by backward. """ @@ -847,6 +879,11 @@ class Block(object): if not self.has_var(var.name()): self.create_var(name=var.name(), desc=var, type=var.type()) + # sync variables removed from c++ end + for var in self.vars.keys(): + if not self.desc.find_var(var): + self.vars.pop(var) + # sync operators from cpp ops_in_cpp = [] for op_idx in range(0, self.desc.op_size()): @@ -873,7 +910,7 @@ class Block(object): for index in range((start_index - 1 - 1), -1, -1): op_desc = ops_in_cpp[index] op = Operator(self, op_desc) - self.ops.appendleft(op) + self.ops.insert(0, op) # sync ops append to the end of cpp_ops for index in range((end_index + 1), len(ops_in_cpp)): @@ -881,6 +918,19 @@ class Block(object): op = Operator(self, op_desc) self.ops.append(op) + # sync ops removed from c++ end + if end_index != -1 and end_index < len(self.ops): + ops_in_cpp_index = 0 + ops_in_python_index = 0 + while ops_in_python_index < len( + self.ops) and ops_in_cpp_index < len(ops_in_cpp): + if self.ops[ops_in_python_index].desc != ops_in_cpp[ + ops_in_cpp_index]: + del self.ops[ops_in_python_index] + else: + ops_in_cpp_index += 1 + ops_in_python_index += 1 + assert len(self.ops) == len(ops_in_cpp) for index in range(len(self.ops)): assert self.ops[index].desc == ops_in_cpp[index] @@ -928,13 +978,27 @@ class Block(object): The new variable cloned from 'var' in current block. """ assert isinstance(var, Variable) - return self.create_var( - name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=True) + ret_var = None + # make STEP_SCOPES var can be safely cloned. + if var.type == core.VarDesc.VarType.STEP_SCOPES: + ret_var = self.create_var( + name=var.name, persistable=var.persistable, type=var.type) + elif var.type == core.VarDesc.VarType.SELECTED_ROWS: + ret_var = self.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=True) + else: + ret_var = self.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True) + return ret_var class Program(object): @@ -1055,24 +1119,6 @@ class Program(object): def current_block(self): return self.blocks[self.current_block_idx] - def append_backward(self, target, no_grad_set=None): - """ - return map(param_name -> (grad_name, block_index, op_index)) - """ - assert isinstance(target, Variable) - if no_grad_set is None: - no_grad_set = set() - try: - param_to_grad_info = self.desc.append_backward(target.desc, - no_grad_set) - except Exception as e: - raise core.EnforceNotMet( - str(e) + "\nCurrent protobuf is\n{0}".format( - self.to_string(False))) - - self.sync_with_cpp() - return param_to_grad_info - def create_block(self, parent_idx=None): new_block_idx = len(self.blocks) parent = self.current_block() if parent_idx is None else self.block( @@ -1137,6 +1183,8 @@ class Parameter(Variable): self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None) + self.do_model_average = kwargs.get('do_model_average', None) + def __str__(self): return self.to_string(True) @@ -1157,7 +1205,7 @@ class Parameter(Variable): if with_details: res_str = Variable.to_string(self, throw_on_error, True) additional_attr = ("trainable", "optimize_attr", "regularizer", - "gradient_clip_attr") + "gradient_clip_attr", "do_model_average") for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index b8d21344fc8f65f4025f28a195dab2d371b30292..125b4efa9d476e561bd78d0365cd92bbf7e66605 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -83,7 +83,7 @@ class Graph(object): file = open(dot_path, 'w') file.write(self.__str__()) image_path = os.path.join( - os.path.dirname(__file__), dot_path[:-3] + "pdf") + os.path.dirname(dot_path), dot_path[:-3] + "pdf") cmd = ["dot", "-Tpdf", dot_path, "-o", image_path] subprocess.Popen( cmd, @@ -199,7 +199,7 @@ class GraphPreviewGenerator(object): else: self.graph.show(path) - def add_param(self, name, data_type, shape, highlight=False): + def add_param(self, name, data_type, highlight=False): label = '\n'.join([ '<', ' ', @@ -214,11 +214,6 @@ class GraphPreviewGenerator(object): str(data_type), ' ' ' ', - ' ', - ' ' - ' ', '
', - '[%s]' % 'x'.join(shape), - '
>', ]) return self.graph.node( diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 927f1e625a579737b98e60683d8d9ed90d5e7e03..4e132ed26183eaa5e572128e679cdbffd42e5a42 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -18,7 +18,8 @@ import contextlib __all__ = [ 'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', - 'init_on_cpu' + 'init_on_cpu', 'ConstantInitializer', 'UniformInitializer', + 'NormalInitializer', 'XavierInitializer' ] _force_init_on_cpu_ = False diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 1bb1aa30ee1019c6f80eb64b6dc20459e7a3073b..b9a53eda9144e9e56cf9bc626db40cf4225bd87f 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -18,6 +18,7 @@ from tensor import assign, fill_constant from .. import core from ..framework import Program, Variable, Operator from ..layer_helper import LayerHelper, unique_name +from ..initializer import force_init_on_cpu from ops import logical_and, logical_not, logical_or __all__ = [ @@ -949,7 +950,7 @@ def create_array(dtype): dtype=dtype) -def less_than(x, y, cond=None, **ignored): +def less_than(x, y, force_cpu=True, cond=None, **ignored): """ **Less than** @@ -958,6 +959,7 @@ def less_than(x, y, cond=None, **ignored): Args: x(Variable): First operand of *less_than* y(Variable): Second operand of *less_than* + force_cpu(Bool|True): The output data will be on CPU if set true. cond(Variable|None): Optional output variable to store the result of *less_than* Returns: @@ -974,8 +976,11 @@ def less_than(x, y, cond=None, **ignored): cond.stop_gradient = True helper.append_op( - type='less_than', inputs={'X': [x], - 'Y': [y]}, outputs={'Out': [cond]}) + type='less_than', + inputs={'X': [x], + 'Y': [y]}, + outputs={'Out': [cond]}, + attrs={'force_cpu': force_cpu or force_init_on_cpu()}) return cond @@ -1396,7 +1401,8 @@ class DynamicRNN(object): type='less_than', inputs={'X': self.step_idx, 'Y': self.max_seq_len}, - outputs={'Out': self.cond}) + outputs={'Out': self.cond}, + attrs={'force_cpu': True}) input_array = parent_block.create_var( name=unique_name.generate('dynamic_rnn_input_array'), @@ -1445,7 +1451,11 @@ class DynamicRNN(object): for new_mem, mem_array in self.mem_link: array_write(x=new_mem, i=self.step_idx, array=mem_array) - less_than(x=self.step_idx, y=self.max_seq_len, cond=self.cond) + less_than( + x=self.step_idx, + y=self.max_seq_len, + force_cpu=True, + cond=self.cond) self.status = DynamicRNN.AFTER_RNN for each_array in self.output_array: diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 3e649dc5fd32c4ed8fa6ad273b7be04d552b51ae..a5938fe494265778ef7032c56a8d6d35acd729c5 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -19,7 +19,6 @@ from layer_function_generator import generate_layer_fn from layer_function_generator import autodoc from ..layer_helper import LayerHelper import tensor -import ops import nn import math @@ -58,7 +57,7 @@ def detection_output(loc, This operation is to get the detection results by performing following two steps: - + 1. Decode input bounding box predictions according to the prior boxes. 2. Get the final detection results by applying multi-class non maximum suppression (NMS). @@ -130,9 +129,9 @@ def detection_output(loc, target_box=loc, code_type='decode_center_size') old_shape = scores.shape - scores = ops.reshape(x=scores, shape=(-1, old_shape[-1])) + scores = nn.reshape(x=scores, shape=(-1, old_shape[-1])) scores = nn.softmax(input=scores) - scores = ops.reshape(x=scores, shape=old_shape) + scores = nn.reshape(x=scores, shape=old_shape) scores = nn.transpose(scores, perm=[0, 2, 1]) scores.stop_gradient = True nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) @@ -463,7 +462,7 @@ def ssd_loss(location, num, num_prior, num_class = confidence.shape def __reshape_to_2d(var): - return ops.reshape(x=var, shape=[-1, var.shape[-1]]) + return nn.reshape(x=var, shape=[-1, var.shape[-1]]) # 1. Find matched boundding box by prior box. # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. @@ -474,7 +473,7 @@ def ssd_loss(location, # 2. Compute confidence for mining hard examples # 2.1. Get the target label based on matched indices - gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, )) + gt_label = nn.reshape(x=gt_label, shape=gt_label.shape + (1, )) gt_label.stop_gradient = True target_label, _ = target_assign( gt_label, matched_indices, mismatch_value=background_label) @@ -487,7 +486,7 @@ def ssd_loss(location, conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) # 3. Mining hard examples - conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior)) + conf_loss = nn.reshape(x=conf_loss, shape=(num, num_prior)) conf_loss.stop_gradient = True neg_indices = helper.create_tmp_variable(dtype='int32') dtype = matched_indices.dtype @@ -556,7 +555,7 @@ def ssd_loss(location, # 5.3 Compute overall weighted loss. loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss # reshape to [N, Np], N is the batch size and Np is the prior box number. - loss = ops.reshape(x=loss, shape=[-1, num_prior]) + loss = nn.reshape(x=loss, shape=[-1, num_prior]) loss = nn.reduce_sum(loss, dim=1, keep_dim=True) if normalize: normalizer = nn.reduce_sum(target_loc_weight) @@ -709,7 +708,7 @@ def multi_box_head(inputs, new_shape = [ -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) ] - out = ops.reshape(x=input, shape=new_shape) + out = nn.reshape(x=input, shape=new_shape) return out def _is_list_or_tuple_(data): @@ -803,7 +802,7 @@ def multi_box_head(inputs, mbox_loc.shape[0], mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3] / 4, 4 ] - mbox_loc_flatten = ops.reshape(mbox_loc, shape=new_shape) + mbox_loc_flatten = nn.reshape(mbox_loc, shape=new_shape) mbox_locs.append(mbox_loc_flatten) # get conf @@ -819,7 +818,7 @@ def multi_box_head(inputs, conf_loc.shape[0], conf_loc.shape[1] * conf_loc.shape[2] * conf_loc.shape[3] / num_classes, num_classes ] - conf_loc_flatten = ops.reshape(conf_loc, shape=new_shape) + conf_loc_flatten = nn.reshape(conf_loc, shape=new_shape) mbox_confs.append(conf_loc_flatten) if len(box_results) == 1: diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index bd7e9c30fed2c38a206bf17a646d8a4433af4099..e7d6c4e2521bee133c4794ed1db669b02fc2152b 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -21,8 +21,7 @@ from ..executor import global_scope __all__ = [ 'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file', - 'open_files', 'read_file', 'create_shuffle_reader', - 'create_double_buffer_reader', 'create_multi_pass_reader' + 'open_files', 'read_file', 'shuffle', 'double_buffer' ] @@ -237,13 +236,9 @@ def monkey_patch_reader_methods(reader): var = scope.find_var(reader.name) return var.get_reader() - def eof(): - return not __get_reader__().has_next() - def reset(): return __get_reader__().reset() - reader.eof = eof reader.reset = reset reader.stop_gradient = True reader.persistable = True @@ -255,10 +250,70 @@ def _copy_reader_var_(block, var): new_var.desc.set_shapes(var.desc.shapes()) new_var.desc.set_dtypes(var.desc.dtypes()) new_var.persistable = True - return monkey_patch_reader_methods(new_var) + return new_var + + +def _copy_reader_create_op_(block, op): + input_param_names = op.input_names + new_input_map = {} + for param_name in input_param_names: + new_input_map[param_name] = [] + arg_names = op.input(param_name) + for arg_name in arg_names: + new_input_map[param_name].append(block.var(arg_name)) + + output_param_names = op.output_names + new_output_map = {} + for param_name in output_param_names: + new_output_map[param_name] = [] + arg_names = op.output(param_name) + for arg_name in arg_names: + new_output_map[param_name].append(block.var(arg_name)) + + new_op = block.append_op( + type=op.type, + inputs=new_input_map, + outputs=new_output_map, + attrs=op.all_attrs()) + return new_op + + +def open_recordio_file(filename, + shapes, + lod_levels, + dtypes, + pass_num=1, + for_parallel=False): + """ + Open a RecordIO file + + This layer takes a RecordIO file to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from the given RecordIO file. + + Args: + filename(str): The RecordIO file's name. + shapes(list): List of tuples which declaring data shapes. + lod_levels(list): List of ints which declaring data lod_level. + dtypes(list): List of strs which declaring data type. + pass_num(int): Number of passes to run. + for_parallel(Bool): Set it as True if you are going to run + subsequent operators in parallel. + Returns: + Variable: A Reader Variable via which we can get RecordIO file data. + + Examples: + .. code-block:: python -def open_recordio_file(filename, shapes, lod_levels, dtypes): + reader = fluid.layers.io.open_recordio_file( + filename='./data.recordio', + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + + # Via the reader, we can use 'read_file' layer to get data: + image, label = fluid.layers.read_file(reader) + """ dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] @@ -283,11 +338,65 @@ def open_recordio_file(filename, shapes, lod_levels, dtypes): startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + if pass_num > 1: + main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num) -def open_files(filenames, thread_num, shapes, lod_levels, dtypes): + if for_parallel: + main_prog_var = parallel(reader=main_prog_var) + + return monkey_patch_reader_methods(main_prog_var) + + +def open_files(filenames, + shapes, + lod_levels, + dtypes, + thread_num, + buffer_size=None, + pass_num=1, + for_parallel=False): + """ + Open files + + This layer takes a list of files to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from given files. All files must + have name suffixs to indicate their formats, e.g., '*.recordio'. + + Args: + filenames(list): The list of file names. + shapes(list): List of tuples which declaring data shapes. + lod_levels(list): List of ints which declaring data lod_level. + dtypes(list): List of strs which declaring data type. + thread_num(int): The maximal concurrent prefetch thread number. + buffer_size(int): The size of prefetch buffer. + pass_num(int): Number of passes to run. + for_parallel(Bool): Set it as True if you are going to run + subsequent operators in parallel. + + Returns: + Variable: A Reader Variable via which we can get file data. + + Examples: + .. code-block:: python + + reader = fluid.layers.io.open_files(filenames=['./data1.recordio', + './data2.recordio'], + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=2, + buffer_size=2) + + # Via the reader, we can use 'read_file' layer to get data: + image, label = fluid.layers.io.read_file(reader) + """ + if buffer_size is None: + buffer_size = thread_num + if isinstance(filenames, basestring): + filenames = [filenames] dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] @@ -296,57 +405,86 @@ def open_files(filenames, thread_num, shapes, lod_levels, dtypes): shape_concat.extend(shape) ranks.append(len(shape)) - var_name = unique_name('multiple_reader') - + multi_file_reader_name = unique_name('multi_file_reader') startup_blk = default_startup_program().current_block() - startup_var = startup_blk.create_var(name=var_name) + startup_reader = startup_blk.create_var(name=multi_file_reader_name) startup_blk.append_op( type='open_files', - outputs={'Out': [startup_var]}, + outputs={'Out': [startup_reader]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'ranks': ranks, 'file_names': filenames, - 'thread_num': thread_num + 'thread_num': thread_num, + 'buffer_size': buffer_size }) - startup_var.desc.set_dtypes(dtypes) - startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + startup_reader.desc.set_dtypes(dtypes) + startup_reader.persistable = True + main_prog_reader = _copy_reader_var_(default_main_program().current_block(), + startup_reader) + if pass_num > 1: + main_prog_reader = multi_pass( + reader=main_prog_reader, pass_num=pass_num) + + if for_parallel: + main_prog_reader = parallel(reader=main_prog_reader) + return monkey_patch_reader_methods(main_prog_reader) -def __create_decorated_reader__(op_type, reader, attrs): + +def __create_shared_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) - startup_blk.append_op( + startop_op = startup_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [startup_var]}, attrs=attrs) startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_block = default_main_program().current_block() + main_prog_var = _copy_reader_var_(main_prog_block, startup_var) + _copy_reader_create_op_(main_prog_block, startop_op) + return monkey_patch_reader_methods(main_prog_var) -def create_shuffle_reader(reader, buffer_size): - return __create_decorated_reader__('create_shuffle_reader', reader, - {'buffer_size': int(buffer_size)}) +def __create_unshared_decorated_reader__(op_type, reader, attrs): + new_reader_name = unique_name(op_type) + main_blk = default_main_program().current_block() + new_reader = main_blk.create_var(name=new_reader_name) + main_blk.append_op( + type=op_type, + inputs={'UnderlyingReader': reader}, + outputs={'Out': [new_reader]}, + attrs=attrs) + new_reader.persistable = True + new_reader.stop_gradient = True + return monkey_patch_reader_methods(new_reader) -def create_double_buffer_reader(reader, place=None): +def shuffle(reader, buffer_size): + return __create_unshared_decorated_reader__( + 'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)}) + + +def double_buffer(reader, place=None): attrs = dict() if place is not None: attrs['place'] = str(place).upper() - return __create_decorated_reader__('create_double_buffer_reader', reader, - attrs) + return __create_unshared_decorated_reader__('create_double_buffer_reader', + reader, attrs) + + +def multi_pass(reader, pass_num): + return __create_shared_decorated_reader__( + 'create_multi_pass_reader', reader, {'pass_num': int(pass_num)}) -def create_multi_pass_reader(reader, pass_num): - return __create_decorated_reader__('create_multi_pass_reader', reader, - {'pass_num': int(pass_num)}) +def parallel(reader): + return __create_shared_decorated_reader__('create_threaded_reader', reader, + {}) def read_file(file_obj): diff --git a/python/paddle/fluid/layers/metric.py b/python/paddle/fluid/layers/metric.py index 3d9157ad4ef9381b70b4007c5bdca91f1482b427..f66dccfa2d040ea0a9d29daeaa1d2da640525959 100644 --- a/python/paddle/fluid/layers/metric.py +++ b/python/paddle/fluid/layers/metric.py @@ -15,12 +15,13 @@ All layers just related to metric. """ +import warnings from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -__all__ = ['accuracy'] +__all__ = ['accuracy', 'auc'] def accuracy(input, label, k=1, correct=None, total=None): @@ -55,3 +56,37 @@ def accuracy(input, label, k=1, correct=None, total=None): "Total": [total], }) return acc_out + + +def auc(input, label, curve='ROC', num_thresholds=200): + warnings.warn( + "This interface not recommended, fluid.layers.auc compute the auc at every minibatch, \ + but can not aggregate them and get the pass AUC, because pass \ + auc can not be averaged with weighted from the minibatch auc value. \ + Please use fluid.metrics.Auc, it can compute the auc value via Python natively, \ + which can get every minibatch and every pass auc value.", Warning) + helper = LayerHelper("auc", **locals()) + topk_out = helper.create_tmp_variable(dtype=input.dtype) + topk_indices = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="top_k", + inputs={"X": [input]}, + outputs={"Out": [topk_out], + "Indices": [topk_indices]}, + attrs={"k": k}) + auc_out = helper.create_tmp_variable(dtype="float32") + if correct is None: + correct = helper.create_tmp_variable(dtype="int64") + if total is None: + total = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="accuracy", + inputs={ + "Out": [topk_out], + "Indices": [topk_indices], + "Label": [label] + }, + attrs={"curve": curve, + "num_thresholds": num_thresholds}, + outputs={"AUC": [auc_out], }) + return auc_out diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2db4e5d27d40e902e2da17b3748c64a1eb3d052b..bba8b64bd88c3edc6eda110dde38c0ced50439f6 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -73,8 +73,11 @@ __all__ = [ 'smooth_l1', 'one_hot', 'autoincreased_step_counter', + 'reshape', 'lod_reset', 'lrn', + 'pad', + 'label_smooth', ] @@ -131,6 +134,8 @@ def fc(input, bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias of this layer. If it is set to None, no bias will be added to the output units. act (str, default None): Activation to be applied to the output of this layer. + use_mkldnn(bool): Use mkldnn kernel or not, it is valid only when the mkldnn + library is installed. Default: False name (str, default None): The name of this layer. Returns: @@ -151,43 +156,70 @@ def fc(input, dtype = helper.input_dtype() mul_results = [] - for input_var, param_attr in helper.iter_inputs_and_params(): - input_shape = input_var.shape + if use_mkldnn: + tmp = helper.create_tmp_variable(dtype) + input_shape = input.shape param_shape = [ reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] w = helper.create_parameter( - attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + is_bias=False) + if bias_attr is None or bias_attr is False: + bias_attr = False + else: + bias_attr = True helper.append_op( - type="mul", - inputs={"X": input_var, - "Y": w}, + type="fc", + inputs={"Input": input, + "W": w}, outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1, - 'use_mkldnn': use_mkldnn - }) - mul_results.append(tmp) - - # sum - if len(mul_results) == 1: - pre_bias = mul_results[0] + attrs={"use_mkldnn": use_mkldnn, + "bias_attr": bias_attr}) + return helper.append_activation(tmp) else: - pre_bias = helper.create_tmp_variable(dtype) - helper.append_op( - type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) - # add bias - pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) - # add activation - return helper.append_activation(pre_activation) + for input_var, param_attr in helper.iter_inputs_and_params(): + input_shape = input_var.shape + param_shape = [ + reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) + ] + [size] + + w = helper.create_parameter( + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type="mul", + inputs={"X": input_var, + "Y": w}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1, + }) + mul_results.append(tmp) + + if len(mul_results) == 1: + pre_bias = mul_results[0] + else: + pre_bias = helper.create_tmp_variable(dtype) + helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}) + # add bias + pre_activation = helper.append_bias_op( + pre_bias, dim_start=num_flatten_dims) + # add activation + return helper.append_activation(pre_activation) def embedding(input, size, is_sparse=False, + is_distributed=False, padding_idx=None, param_attr=None, dtype='float32'): @@ -238,8 +270,11 @@ def embedding(input, inputs={'Ids': input, 'W': w}, outputs={'Out': tmp}, - attrs={'is_sparse': is_sparse, - 'padding_idx': padding_idx}) + attrs={ + 'is_sparse': is_sparse, + 'is_distributed': is_distributed, + 'padding_idx': padding_idx + }) return tmp @@ -1483,9 +1518,11 @@ def batch_norm(input, param_attr=None, bias_attr=None, data_layout='NCHW', + in_place=False, name=None, moving_mean_name=None, - moving_variance_name=None): + moving_variance_name=None, + do_model_average_for_mean_and_var=False): """ This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters. @@ -1516,7 +1553,10 @@ def batch_norm(input, mean = helper.create_parameter( attr=ParamAttr( - name=moving_mean_name, initializer=Constant(0.0), trainable=False), + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=input.dtype) mean.stop_gradient = True @@ -1525,7 +1565,8 @@ def batch_norm(input, attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), - trainable=False), + trainable=False, + do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=input.dtype) variance.stop_gradient = True @@ -1538,7 +1579,7 @@ def batch_norm(input, saved_mean = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) saved_variance = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - batch_norm_out = helper.create_tmp_variable(dtype) + batch_norm_out = input if in_place else helper.create_tmp_variable(dtype) helper.append_op( type="batch_norm", @@ -3264,6 +3305,8 @@ def one_hot(input, depth): The one-hot tensor or LodTensor, same as input. Examples: + .. code-block:: python + X is a LoDTensor: X.lod = [[0, 1, 4]] X.shape = [4, 1] @@ -3318,6 +3361,102 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): return counter +def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): + """ + Gives a new shape to the input Tensor without changing its data. + + The target shape can be given by :attr:`shape` or :attr:`actual_shape`. + :attr:`shape` is a list of integer while :attr:`actual_shape` is a tensor + variable. :attr:`actual_shape` has a higher priority than :attr:`shape` + if it is provided, while :attr:`shape` still should be set correctly to + gurantee shape inference in compile-time. + + Some tricks exist when specifying the target shape. + + 1. -1 means the value of this dimension is inferred from the total element + number of x and remaining dimensions. Thus one and only one dimension can + be set -1. + + 2. 0 means the actual dimension value is going to be copied from the + corresponding dimension of x. The indice of 0s in shape can not exceed + Rank(X). + + Here are some examples to explain it. + + 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape + is [6, 8], the reshape operator will transform x into a 2-D tensor with + shape [6, 8] and leaving x's data unchanged. + + 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape + specified is [2, 3, -1, 2], the reshape operator will transform x into a + 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this + case, one dimension of the target shape is set to -1, the value of this + dimension is inferred from the total element number of x and remaining + dimensions. + + 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape + is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor + with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case, + besides -1, 0 means the actual dimension value is going to be copied from + the corresponding dimension of x. + + Args: + input(variable): The input tensor. + shape(list): The new shape. At most one dimension of the new shape can + be -1. + actual_shape(variable): An optional input. If provided, reshape + according to this given shape rather than + :attr:`shape` specifying shape. That is to + say :attr:`actual_shape` has a higher priority + than :attr:`shape`. + act (str): The non-linear activation to be applied to output variable. + inplace(bool): If this flag is set true, a new output tensor is created + whose data is copied from input x, otherwise the output + shares data with input without copying. + + Returns(variable): The output tensor. + + Examples: + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[2, 4, 6], dtype='float32') + reshaped = fluid.layers.reshape( + x=data, shape=[-1, 0, 3, 2], act='tanh', inplace=True) + """ + + if not (isinstance(shape, list) or isinstance(shape, tuple)): + raise ValueError("Input shape must be a python lsit or tuple.") + + # Validate the shape + unk_dim_idx = -1 + for dim_idx, dim_size in enumerate(shape): + if dim_size == -1: + assert unk_dim_idx == -1, ( + "Only one dimension in shape can be unknown.") + unk_dim_idx = dim_idx + elif dim_size == 0: + assert dim_idx < len(x.shape), ( + "The indice of 0s in shape can not exceed Rank(X).") + else: + assert dim_size > 0, ( + "Each dimension size given in shape must not be negtive " + "except one unknown dimension.") + + helper = LayerHelper("reshape", **locals()) + reshaped = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="reshape", + inputs={"X": x, + "Shape": actual_shape} + if isinstance(actual_shape, Variable) else {"X": x}, + attrs={"shape": shape, + "inplace": inplace}, + outputs={"Out": reshaped}) + + return helper.append_activation(reshaped) + + def lod_reset(x, y=None, target_lod=None): """ LoD Reset Operator. Set LoD of **x** to a new one specified by **y** or @@ -3481,3 +3620,127 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): "beta": beta}) return lrn_out + + +def pad(x, paddings, pad_value=0., name=None): + """ + Pads a tensor with a constant value given by :attr:`pad_value`, and the + padded width is specified by :attr:`paddings`. + + Specifically, the number of values padded before the contents of :attr:`x` + in dimension :attr:`i` is indicated by :attr:`paddings[i]`, and the number + of values padded after the contents of :attr:`x` in dimension :attr:`i` is + indicated by :attr:`paddings[i+1]`. + + See below for an example. + + .. code-block:: text + + Given: + x = [[1, 2], [3, 4]] + + paddings = [0, 1, 1, 2] + + pad_value = 0 + + Return: + + out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] + + Args: + x (Variable): The input tensor variable. + paddings (list): A list of integers. Its elements specify the padded + width before and after for each dimension in turn. + The length of :attr:paddings must be + :math:`rank(x) \\times 2`. + pad_value (float): The constant value used to pad. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The padded tensor variable. + + Examples: + .. code-block:: python + + # x is a rank 2 tensor variable. + out = fluid.layers.pad( + x=x, paddings=[0, 1, 1, 2], pad_value=0.) + """ + helper = LayerHelper('pad', input=x, **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + helper.append_op( + type='pad', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'paddings': paddings, + 'pad_value': float(pad_value)}) + return out + + +def label_smooth(label, + prior_dist=None, + epsilon=0.1, + dtype="float32", + name=None): + """ + Label smoothing is a mechanism to regularize the classifier layer and is + called label-smoothing regularization (LSR). + + Label smoothing is proposed to encourage the model to be less confident, + since optimizing the log-likelihood of the correct label directly may + cause overfitting and reduce the ability of the model to adapt. Label + smoothing replaces the ground-truth label :math:`y` with the weighted sum + of itself and some fixed distribution :math:`\mu`. For class :math:`k`, + i.e. + + .. math:: + + \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k, + + where :math:`1 - \epsilon` and :math:`\epsilon` are the weights + respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually + uniform distribution is used for :math:`\mu`. + + See more details about label smoothing in https://arxiv.org/abs/1512.00567. + + Args: + label(Variable): The input variable containing the label data. The + label data should use one-hot representation. + prior_dist(Variable): The prior distribution to be used to smooth + labels. If not provided, an uniform distribution + is used. The shape of :attr:`prior_dist` should + be :math:`(1, class\_num)`. + epsilon(float): The weight used to mix up the original ground-truth + distribution and the fixed distribution. + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, + float_64, int etc. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The tensor variable containing the smoothed labels. + + Examples: + .. code-block:: python + + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) + smooth_label = layers.label_smooth( + label=one_hot_label, epsilon=0.1, dtype="float32") + """ + if epsilon > 1. or epsilon < 0.: + raise ValueError("The value of epsilon must be between 0 and 1.") + helper = LayerHelper("label_smooth", **locals()) + label.stop_gradient = True + smooth_label = helper.create_tmp_variable(dtype) + helper.append_op( + type="label_smooth", + inputs={"X": label, + "PriorDist": prior_dist} if prior_dist else {"X": label}, + outputs={"Out": smooth_label}, + attrs={"epsilon": float(epsilon)}) + return smooth_label diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index f5c6b47d243dcf4ba985cfb41fc23b44d3ed809f..a9fe25744cc0b385479c9366af1b731ec221dd5a 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -25,6 +25,8 @@ __activations__ = [ 'abs', 'ceil', 'floor', + 'cos', + 'sin', 'round', 'reciprocal', 'log', @@ -47,7 +49,6 @@ __activations__ = [ __all__ = [ 'mean', 'mul', - 'reshape', 'scale', 'sigmoid_cross_entropy_with_logits', 'elementwise_add', diff --git a/python/paddle/fluid/memory_optimization_transpiler.py b/python/paddle/fluid/memory_optimization_transpiler.py index 41d1eca82e8b680977f44f1756c25c37340668a4..20ed19104207c1f0aa45db8f44570377011f3cde 100644 --- a/python/paddle/fluid/memory_optimization_transpiler.py +++ b/python/paddle/fluid/memory_optimization_transpiler.py @@ -29,17 +29,20 @@ dtype_to_size = { core.VarDesc.VarType.BOOL: 1 } -sub_block_ops = [ +SUB_BLOCK_OPS = [ "while", "while_grad", "parallel_do", "parallel_do_grad", "conditional_block", "conditional_block_grad" ] +SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"), + ("conditional_block", "conditional_block_grad")] + PRINT_LOG = False class ControlFlowGraph(object): - def __init__(self, Program, ops, forward_num, skip_opt): - self._program = Program + def __init__(self, program, ops, forward_num, skip_opt): + self._program = program self._ops = ops self._forward_num = forward_num self._successors = defaultdict(set) @@ -51,6 +54,7 @@ class ControlFlowGraph(object): self._skip_opt = skip_opt def _add_connections(self, connections): + """Populates _successors and _presuccessors for two neighbor nodes.""" for node1, node2 in connections: self._add(node1, node2) @@ -58,7 +62,11 @@ class ControlFlowGraph(object): self._successors[node1].add(node2) self._presuccessors[node2].add(node1) + # TODO(panyx0718): We need to have a unified way of building intermediate + # representation. def _build_graph(self): + """Build a graph based on op sequence. + """ self.op_size = len(self._ops) op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)] self._add_connections(op_node_connections) @@ -82,15 +90,14 @@ class ControlFlowGraph(object): self._live_out[i].add(new_name) def _reach_fixed_point(self, live_in, live_out): + """Check if the liveness set has stablized.""" if len(live_in) != len(self._live_in): return False if len(live_out) != len(self._live_out): return False for i in range(self.op_size): - if live_in[i] != self._live_in[i]: - return False - for i in range(self.op_size): - if live_out[i] != self._live_out[i]: + if (live_in[i] != self._live_in[i] or + live_out[i] != self._live_out[i]): return False return True @@ -98,6 +105,8 @@ class ControlFlowGraph(object): self._build_graph() live_in = defaultdict(set) live_out = defaultdict(set) + # Repeatedly apply liveness updates until the algorithm stablize + # on a complete set live input vars and live output vars. while True: for i in range(self.op_size, 0, -1): live_in[i] = set(self._live_in[i]) @@ -141,6 +150,8 @@ class ControlFlowGraph(object): return False return True + # TODO(panyx0718): This needs to be less hacky. It seems memory optimization + # doesn't consider vars copied between cpu and gpu. def _update_skip_opt_set(self): for i in range(self.op_size): op = self._ops[i] @@ -154,7 +165,7 @@ class ControlFlowGraph(object): bwd_id = 0 for i in range(self.op_size): op = self._ops[i] - if op.type() in sub_block_ops: + if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() is_forward = i < self._forward_num @@ -177,13 +188,15 @@ class ControlFlowGraph(object): def compare_shape(x_shape, cache_shape, opt_level): if opt_level == 0: return x_shape == cache_shape - if opt_level == 1: + elif opt_level == 1: if (x_shape[0] == -1) ^ (cache_shape[0] == -1): return False x_size = abs(reduce(lambda x, y: x * y, x_shape)) cache_size = abs(reduce(lambda x, y: x * y, cache_shape)) if x_size <= cache_size: return True + else: + raise ValueError("only support opt_level 0 or 1.") return False self._dataflow_analyze() @@ -191,10 +204,9 @@ class ControlFlowGraph(object): self.pool = [] for i in range(self.op_size): op = self._ops[i] - if op.type() in sub_block_ops: + if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() - self.current_block_desc = block_desc is_forward = i < self._forward_num if self.pool: defs_can_optimize = filter( @@ -211,37 +223,40 @@ class ControlFlowGraph(object): for index, cache_pair in enumerate(self.pool): cache_var = cache_pair[0] cache_shape = cache_pair[1] - if compare_shape(x_shape, cache_shape, level): - if self._has_var(block_desc, cache_var, is_forward): - x_dtype = self._find_var(block_desc, x, - is_forward).dtype() - cache_dtype = self._find_var( - block_desc, cache_var, is_forward).dtype() - # TODO(qijun): actually, we should compare dtype_to_size[x_dtype] - # and dtype_to_size[cache_dtype] - if x_dtype == cache_dtype: - if PRINT_LOG: - print( - ("Hit Cache !!!! cache pool index " - "is %d, var name is %s, " - "cached var name is %s, " - "var shape is %s ") % - (index, x, cache_var, - str(cache_shape))) - self.pool.pop(index) - if x == cache_var: - break - _rename_arg_( - self._ops, x, cache_var, begin_idx=i) - self._program.block(block_desc.id).var( - str(x)).desc = self._find_var( - block_desc, cache_var, is_forward) - self._update_graph( - x, cache_var, begin_idx=i) - break - - in_diff, out_diff = self._get_diff(self._live_in[i], - self._live_out[i]) + if not compare_shape(x_shape, cache_shape, level): + continue + + if not self._has_var(block_desc, cache_var, is_forward): + continue + + x_dtype = self._find_var(block_desc, x, + is_forward).dtype() + cache_dtype = self._find_var(block_desc, cache_var, + is_forward).dtype() + # TODO(qijun): actually, we should compare + # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype] + if x_dtype != cache_dtype: + continue + + if PRINT_LOG: + print(("Hit Cache !!!! cache pool index " + "is %d, var name is %s, " + "cached var name is %s, " + "var shape is %s ") % (index, x, cache_var, + str(cache_shape))) + self.pool.pop(index) + if x == cache_var: + break + # Rename the var to the cache var already with + # memory allocated in order to reuse the memory. + _rename_arg_(self._ops, x, cache_var, begin_idx=i) + self._program.block(block_desc.id).var(str( + x)).desc = self._find_var(block_desc, cache_var, + is_forward) + self._update_graph(x, cache_var, begin_idx=i) + break + + in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i]) can_optimize = filter( lambda x: self._check_var_validity(block_desc, x, is_forward), in_diff) @@ -252,6 +267,19 @@ class ControlFlowGraph(object): def _process_sub_block_pair(pdesc, sub_block_pair): + """Creates a list of tuple each of which tracks info of a subblock. + + Note: this function doesn't handle nested subblocks yet. + TODO(panyx0718): assert if case nested subblocks happen. + + :param pdesc: ProgramDesc. + :param sub_block_pair: A list op pairs. Each op pair is the forward + op and backward op. The ops in the list are special that they contain + a subblock of ops. + :return: A list of tuples, each tuple is (all ops in a subblock pair + including forward and backward, number of forward ops, + all output args names of the ops in the subblock pairs). + """ ops_list = [] block_desc = pdesc.block(0) op_size = block_desc.op_size() @@ -308,6 +336,11 @@ def _process_sub_block_pair(pdesc, sub_block_pair): def _get_cfgs(input_program): + """Process each block and create ControlFlowGraph for each of them. + + :param input_program: Program object. + :return: A list of ControlFlowGraph, each corresponds to a block. + """ ops_list = [] pdesc = input_program.get_desc() block_desc = pdesc.block(0) @@ -316,11 +349,8 @@ def _get_cfgs(input_program): ops_list.append( ([block_desc.op(i) for i in range(op_size)], op_size, set())) - sub_block_pair = [("while", "while_grad"), ("parallel_do", - "parallel_do_grad"), - ("conditional_block", "conditional_block_grad")] - - ops_list.extend(_process_sub_block_pair(pdesc, sub_block_pair)) + # Only process one level of nested subblock. + ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR)) cfgs = [ ControlFlowGraph(input_program, ops, forward_num, skip_opt) @@ -330,6 +360,17 @@ def _get_cfgs(input_program): def memory_optimize(input_program, print_log=False, level=0): + """Optimize memory by reusing var memory. + + Note: it doesn't not support subblock nested in subblock. + + :param input_program: Input Program + :param print_log: whether to print debug log. + :param level: If level=0, reuse if the shape is completely equal, o + :return: + """ + if level != 0 and level != 1: + raise ValueError("only support opt_level 0 or 1.") global PRINT_LOG PRINT_LOG = print_log cfgs = _get_cfgs(input_program) diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..c618b02a768f2ca3e2b2914d8ee0134836d5c0d2 --- /dev/null +++ b/python/paddle/fluid/metrics.py @@ -0,0 +1,378 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fluid Metrics + +The metrics are accomplished via Python natively. +""" +import numpy as np +import copy +import warnings + +__all__ = [ + 'MetricBase', + 'CompositeMetric', + 'Accuracy', + 'ChunkEvaluator', + 'EditDistance', + 'DetectionMAP', + 'Auc', +] + + +def _is_numpy_(var): + return isinstance(var, (np.ndarray, np.generic)) + + +def _is_number_(var): + return isinstance(var, int) or isinstance(var, float) or (isinstance( + var, np.ndarray) and var.shape == (1, )) + + +def _is_number_or_matrix_(var): + return _is_number_(var) or isinstance(var, np.ndarray) + + +class MetricBase(object): + """ + Base Class for all evaluators + + Args: + name(str): The name of evaluator. such as, "accuracy". Used for generate + temporary variable name. + Interface: + Note(*) : the states is the attributes who not has _ prefix. + + get_config(): print current states and configuration + reset(): clear the states. If the Metrics states type is not (int, float, np.ndarray), + Please override this method. + update(): update states at every minibatch + eval(): get metric evaluation in numpy type. + """ + + def __init__(self, name, **kwargs): + self._name = str(name) if name != None else self.__class__.__name__ + self._kwargs = kwargs if kwargs != None else dict() + self.reset() + + def __str__(self): + return self._name + + def reset(self): + """ + states is the attributes who not has _ prefix. + reset the states of metrics. + """ + states = { + attr: value + for attr, value in self.__dict__.iteritems() + if not attr.startswith("_") + } + for attr, value in states.iteritems(): + if isinstance(value, int): + setattr(self, attr, 0) + elif isinstance(value, float): + setattr(self, attr, .0) + elif isinstance(value, (np.ndarray, np.generic)): + setattr(self, attr, np.zeros_like(value)) + else: + setattr(self, attr, None) + + def get_config(self): + states = { + attr: value + for attr, value in self.__dict__.iteritems() + if not attr.startswith("_") + } + config = copy.deepcopy(self._kwargs) + config.update({"name": self._name, "states": copy.deepcopy(states)}) + return config + + def update(self): + raise NotImplementedError() + + def eval(self): + raise NotImplementedError() + + +class CompositeMetric(MetricBase): + """ + Compute multiple metrics in each minibatch. + for example, merge F1, accuracy, recall into one Metric. + """ + + def __init__(self, name=None, **kwargs): + super(CompositeMetric, self).__init__(name, kwargs) + self._metrics = [] + + def add_metric(self, metric): + if not isinstance(metric, MetricBase): + raise ValueError("SubMetric should be inherit from MetricBase.") + self._metrics.append(metric) + + def eval(self): + ans = [] + for m in self._metrics: + ans.append(m.eval()) + return ans + + +class Accuracy(MetricBase): + """ + Accumulate the accuracy from minibatches and compute the average accuracy + for every pass. + + Args: + name: the metrics name + + Example: + minibatch_accuracy = fluid.layers.accuracy(pred, label) + accuracy_evaluator = fluid.metrics.Accuracy() + for epoch in PASS_NUM: + accuracy_evaluator.reset() + for data in batches: + loss = exe.run(fetch_list=[cost, minibatch_accuracy]) + accuracy_evaluator.update(value=minibatch_accuracy, weight=batches) + accuracy = accuracy_evaluator.eval() + """ + + def __init__(self, name=None): + super(Accuracy, self).__init__(name) + self.value = .0 + self.weight = .0 + + def update(self, value, weight): + if not _is_number_or_matrix_(value): + raise ValueError( + "The 'value' must be a number(int, float) or a numpy ndarray.") + if not _is_number_(weight): + raise ValueError("The 'weight' must be a number(int, float).") + self.value += value * weight + self.weight += weight + + def eval(self): + if self.weight == 0: + raise ValueError( + "There is no data in Accuracy Metrics. Please check layers.accuracy output has added to Accuracy." + ) + return self.value / self.weight + + +class ChunkEvaluator(MetricBase): + """ + Accumulate counter numbers output by chunk_eval from mini-batches and + compute the precision recall and F1-score using the accumulated counter + numbers. + """ + + def __init__(self, name=None): + super(ChunkEvaluator, self).__init__(name) + self.num_infer_chunks = 0 + self.num_label_chunks = 0 + self.num_correct_chunks = 0 + + def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks): + if not _is_number_or_matrix_(num_infer_chunks): + raise ValueError( + "The 'num_infer_chunks' must be a number(int, float) or a numpy ndarray." + ) + if not _is_number_or_matrix_(num_label_chunks): + raise ValueError( + "The 'num_label_chunks' must be a number(int, float) or a numpy ndarray." + ) + if not _is_number_or_matrix_(num_correct_chunks): + raise ValueError( + "The 'num_correct_chunks' must be a number(int, float) or a numpy ndarray." + ) + self.num_infer_chunks += num_infer_chunks + self.num_label_chunks += num_label_chunks + self.num_correct_chunks += num_correct_chunks + + def eval(self): + precision = float( + self.num_correct_chunks + ) / self.num_infer_chunks if self.num_infer_chunks else 0 + recall = float(self.num_correct_chunks + ) / self.num_label_chunks if self.num_label_chunks else 0 + f1_score = float(2 * precision * recall) / ( + precision + recall) if self.num_correct_chunks else 0 + return precision, recall, f1_score + + +class EditDistance(MetricBase): + """ + Accumulate edit distance sum and sequence number from mini-batches and + compute the average edit_distance and instance error of all batches. + + Args: + name: the metrics name + + Example: + edit_distance_metrics = fluid.layers.edit_distance(input, label) + distance_evaluator = fluid.metrics.EditDistance() + for epoch in PASS_NUM: + distance_evaluator.reset() + for data in batches: + loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics)) + distance_evaluator.update(*edit_distance_metrics) + distance, instance_error = distance_evaluator.eval() + + In the above example: + 'distance' is the average of the edit distance in a pass. + 'instance_error' is the instance error rate in a pass. + + """ + + def __init__(self, name): + super(EditDistance, self).__init__(name) + self.total_distance = .0 + self.seq_num = 0 + self.instance_error = 0 + + def update(self, distances, seq_num): + if not _is_numpy_(distances): + raise ValueError("The 'distances' must be a numpy ndarray.") + if not _is_number_(seq_num): + raise ValueError("The 'seq_num' must be a number(int, float).") + seq_right_count = np.sum(distances == 0) + total_distance = np.sum(distances) + self.seq_num += seq_num + self.instance_error += seq_num - seq_right_count + self.total_distance += total_distance + + def eval(): + if self.seq_num == 0: + raise ValueError( + "There is no data in EditDistance Metric. Please check layers.edit_distance output has been added to EditDistance." + ) + avg_distance = self.total_distance / self.seq_num + avg_instance_error = self.instance_error / self.seq_num + return avg_distance, avg_instance_error + + +class DetectionMAP(MetricBase): + """ + Calculate the detection mean average precision (mAP). + + TODO (Dang Qingqing): update the following doc. + The general steps are as follows: + 1. calculate the true positive and false positive according to the input + of detection and labels. + 2. calculate mAP value, support two versions: '11 point' and 'integral'. + + Please get more information from the following articles: + https://sanchom.wordpress.com/tag/average-precision/ + https://arxiv.org/abs/1512.02325 + """ + + def __init__(self, name=None): + super(DetectionMAP, self).__init__(name) + # the current map value + self.value = .0 + + def update(self, value, weight): + if not _is_number_or_matrix_(value): + raise ValueError( + "The 'value' must be a number(int, float) or a numpy ndarray.") + if not _is_number_(weight): + raise ValueError("The 'weight' must be a number(int, float).") + self.value += value + self.weight += weight + + def eval(self): + if self.weight == 0: + raise ValueError( + "There is no data in DetectionMAP Metrics. " + "Please check layers.detection_map output has added to DetectionMAP." + ) + return self.value / self.weight + + +class Auc(MetricBase): + """ + Auc Metrics which adapts to binary classification. + Need to note that auc metrics compute the value via Python natively. + If you concern the speed, please use the fluid.layers.auc instead. + + The `auc` function creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the AUC. To discretize the AUC curve, a linearly spaced set of + thresholds is used to compute pairs of recall and precision values. The area + under the ROC-curve is therefore computed using the height of the recall + values by the false positive rate, while the area under the PR-curve is the + computed using the height of the precision values by the recall. + + Args: + name: metric name + curve: Specifies the name of the curve to be computed, 'ROC' [default] or + 'PR' for the Precision-Recall-curve. + num_thresholds: The number of thresholds to use when discretizing the roc + curve. + + "NOTE: only implement the ROC curve type via Python now." + """ + + def __init__(self, name, curve='ROC', num_thresholds=200): + super(MetricBase, self).__init__(name, curve, num_thresholds) + self._curve = curve + self._num_thresholds = num_thresholds + self._epsilon = 1e-6 + self.tp_list = np.ndarray((num_thresholds, )) + self.fn_list = np.ndarray((num_thresholds, )) + self.tn_list = np.ndarray((num_thresholds, )) + self.fp_list = np.ndarray((num_thresholds, )) + + def update(self, labels, predictions, axis=1): + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + if not _is_numpy_(predictions): + raise ValueError("The 'predictions' must be a numpy ndarray.") + + kepsilon = 1e-7 # to account for floating point imprecisions + thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) + for i in range(num_thresholds - 2)] + thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] + + # caculate TP, FN, TN, FP count + for idx_thresh, thresh in enumerate(thresholds): + tp, fn, tn, fp = 0, 0, 0, 0 + for i, lbl in enumerate(labels): + if lbl: + if predictions[i, 0] >= thresh: + tp += 1 + else: + fn += 1 + else: + if predictions[i, 0] >= thresh: + fp += 1 + else: + tn += 1 + tp_list[idx_thresh] += tp + fn_list[idx_thresh] += fn + tn_list[idx_thresh] += tn + fp_list[idx_thresh] += fp + + def eval(self): + epsilon = self._epsilon + num_thresholds = self._num_thresholds + tpr = (tp_list.astype("float32") + epsilon) / ( + tp_list + fn_list + epsilon) + fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon) + rec = (tp_list.astype("float32") + epsilon) / ( + tp_list + fp_list + epsilon) + + x = fpr[:num_thresholds - 1] - fpr[1:] + y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0 + auc_value = np.sum(x * y) + return auc_value diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 3b2e1a3073251a6d6460450dc957e1b5c7a873c5..bbedf6fde0872fd32d81c103bf5fe61449b7f57b 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -98,7 +98,7 @@ def img_conv_group(input, use_mkldnn=use_mkldnn) if conv_with_batchnorm[i]: - tmp = layers.batch_norm(input=tmp, act=conv_act) + tmp = layers.batch_norm(input=tmp, act=conv_act, in_place=True) drop_rate = conv_batchnorm_drop_rate[i] if abs(drop_rate) > 1e-5: tmp = layers.dropout(x=tmp, dropout_prob=drop_rate) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 180575c35dc6e115e11cccf9fff9fb2d3cd7e9a6..36503cac6d5391821b977d90e6b77c4df7e3b564 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import re from collections import defaultdict from paddle.fluid.framework import Program import framework @@ -818,8 +818,8 @@ class ModelAverage(Optimizer): min_average_window, max_average_window and current update times. Args: - params_grads: A list of parameter-grad variable pairs. average_window_rate: The rate of average window. + params_grads: A list of parameter-grad variable pairs. min_average_window: The minimum size of average window. max_average_window: The maximum size of average window. @@ -840,8 +840,8 @@ class ModelAverage(Optimizer): """ def __init__(self, - params_grads, average_window_rate, + params_grads=None, min_average_window=10000, max_average_window=10000, **kwargs): @@ -849,24 +849,37 @@ class ModelAverage(Optimizer): self.average_window = average_window_rate self.min_average_window = min_average_window self.max_average_window = max_average_window - self.params_grads = params_grads + + self.params_grads = [] if params_grads is None else params_grads + params = {} + for param, grad in self.params_grads: + if param.do_model_average != False: + params[param.name] = (param, grad) + for param in framework.default_main_program().global_block( + ).all_parameters(): + if param.name not in params and param.do_model_average != False: + grad = param.block.create_var( + name=unique_name.generate(".".join([param.name, 'tmp'])), + dtype=param.dtype, + persistable=False, + stop_gradient=True) + params[param.name] = (param, grad) + self.params_grads = params.values() + for param, grad in self.params_grads: - if grad is not None: - self._append_average_accumulate_op(param) + self._append_average_accumulate_op(param) self.apply_program = Program() block = self.apply_program.global_block() with program_guard(main_program=self.apply_program): for param_grad in self.params_grads: - if param_grad[1] is not None: - self._add_average_apply_op(block, param_grad) + self._add_average_apply_op(block, param_grad) self.restore_program = Program() block = self.restore_program.global_block() with program_guard(main_program=self.restore_program): for param_grad in self.params_grads: - if param_grad[1] is not None: - self._add_average_restore_op(block, param_grad) + self._add_average_restore_op(block, param_grad) def _add_average_apply_op(self, block, param_grad): param = block.clone_variable(param_grad[0]) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..5ce2aa1fc4d0b275b502af0f97e4a0f83e85de5b --- /dev/null +++ b/python/paddle/fluid/parallel_executor.py @@ -0,0 +1,150 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import core +import multiprocessing +import framework +import executor + +__all__ = ['ParallelExecutor'] + + +class ParallelExecutor(object): + def __init__(self, + use_cuda, + loss_name=None, + main_program=None, + num_threads=None, + allow_op_delay=False, + share_vars_from=None): + """ + ParallelExecutor can run program in parallel. + + Args: + use_cuda(bool): Whether to use CUDA or not. + loss_name(str, default None): The loss name must set in training. + main_program(Program, default None): The program that need to run, + if not provided, then default_main_program will be used. + num_threads(int, default None): How many threads are used for + training. + allow_op_delay(bool, default False): Whether to delay and buffer + some operators together for scheduling or not, which may + improve performance in some cases, defalut False. + share_vars_from(ParallelExecutor, default None): If provied, + it will share variables from the specified ParallelExecutor. + + Returns: + A ParallelExecutor object. + + Raises: + TypeError: If share_vars_from is provided, but not ParallelExecutor + object. + + Examples: + .. code-block:: python + + train_exe = fluid.ParallelExecutor( + use_cuda=True, loss_name=loss.name) + test_exe = fluid.ParallelExecutor( + use_cuda=True, + main_program=test_program, + share_vars_from=train_exe) + + train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) + test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + """ + + self._places = [] + self._act_places = [] + if use_cuda: + for i in xrange(core.get_cuda_device_count()): + p = core.Place() + self._act_places.append(core.CUDAPlace(i)) + p.set_place(self._act_places[-1]) + self._places.append(p) + else: + for i in xrange(multiprocessing.cpu_count()): + p = core.Place() + self._act_places.append(core.CPUPlace(i)) + p.set_place(self._act_places[-1]) + self._places.append(p) + assert self._places, "no place for execution" + + if num_threads is None: + if use_cuda: + # Experiments on se-resnext shows that too many threads hurt + # performance. Worth tunning for other models in the future. + num_threads = len(self._places) + else: + num_threads = min( + len(self._places) * 2, multiprocessing.cpu_count()) + + main = main_program + main = main if main else framework.default_main_program() + scope = executor.global_scope() + + if share_vars_from and not isinstance(share_vars_from, + ParallelExecutor): + raise TypeError("share_vars_from must be ParallelExecutor.") + local_scopes = share_vars_from.executor.local_scopes( + ) if share_vars_from else [] + + self.persistable_vars = [ + v.name + for v in filter(lambda var: \ + var.persistable and var.type != core.VarDesc.VarType.RAW, + main.list_vars()) + ] + + self.executor = core.ParallelExecutor( + num_threads, + True if use_cuda else False, # use_event + self._places, + set([ + p.name for p in main.global_block().iter_parameters() + if not p.stop_gradient + ]), + set(self.persistable_vars), + main.desc, + loss_name if loss_name else '', + scope, + local_scopes, + allow_op_delay) + self.scope = scope + + def run(self, fetch_list, feed_dict={}): + """ + :param fetch_list: A list of variable names that will be fetched. + :param feed_dict: A dict mapping for feed variable name to LoDTensor + or numpy array. + :return: fetched value list. + """ + if not isinstance(feed_dict, dict): + raise TypeError("feed_dict should be a dict") + + feed_tensor_dict = {} + for i, feed_name in enumerate(feed_dict): + feed_tensor = feed_dict[feed_name] + if not isinstance(feed_tensor, core.LoDTensor): + feed_tensor = core.LoDTensor() + feed_tensor.set(feed_dict[feed_name], self._act_places[0]) + feed_tensor_dict[feed_name] = feed_tensor + + fetch_var_name = '@FETCHED_VAR_NAME@' + self.executor.run(fetch_list, fetch_var_name, feed_tensor_dict) + arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() + return [arr[i] for i in range(len(arr))] + + def bcast_params(self): + self.executor.bcast_params(set(self.persistable_vars)) diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 255cd2104325afa31449cbd3875499a7c5d7f572..1c6970441bccdc1c1221503256c30c83502bd123 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -28,13 +28,15 @@ class ParamAttr(object): learning_rate=1.0, regularizer=None, trainable=True, - gradient_clip=None): + gradient_clip=None, + do_model_average=None): self.name = name self.initializer = initializer self.learning_rate = learning_rate self.regularizer = regularizer self.trainable = trainable self.gradient_clip = gradient_clip + self.model_average = do_model_average def set_default_initializer(self, initializer): if initializer is None: @@ -80,7 +82,8 @@ class ParamAttr(object): }, 'regularizer': self.regularizer, 'trainable': self.trainable, - 'gradient_clip_attr': self.gradient_clip + 'gradient_clip_attr': self.gradient_clip, + 'model_average': self.model_average } if with_initializer: kwargs['initializer'] = self.initializer @@ -90,7 +93,7 @@ class ParamAttr(object): class WeightNormParamAttr(ParamAttr): """ Used for weight normalization. Any field in ParamAttr can also be set here. - Besides, an extra field dim can be set to indicate the dimension except + Besides, an extra field dim can be set to indicate the dimension except which to normalize. """ # List to record the parameters reparameterized by weight normalization. diff --git a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py index 983f8f4dbeac83566839de25ec9765eb248be768..ce640dece8a5067bd10f410a2bb58874b7cc0908 100644 --- a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -13,7 +13,7 @@ # limitations under the License. import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 93ef66851b0efd65361122853dadeefe11992ed5..6dfc2997ae0328a41fe22d13dfa8fc51d4d021a6 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import contextlib import numpy diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index b01c1875d64d7fc14e0141672f7e8eab2b6a0394..e8bb082be196b6342b1719235f1264bbe3d776ac 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -14,7 +14,7 @@ from __future__ import print_function -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import contextlib import math diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index f488527e0bc69059bc44422aa28188441f3d5b54..d9cd76952e31f8185512ab45f9f3ab2ce7d9da48 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import math - import numpy as np -import paddle.v2 as paddle -import paddle.v2.dataset.conll05 as conll05 -import paddle.fluid as fluid -from paddle.fluid.initializer import init_on_cpu -import contextlib +import os import time import unittest -import os + +import paddle +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) @@ -37,7 +36,7 @@ depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True -PASS_NUM = 10 +PASS_NUM = 100 BATCH_SIZE = 10 embedding_name = 'emb' @@ -77,7 +76,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, emb_layers.append(mark_embedding) hidden_0_layers = [ - fluid.layers.fc(input=emb, size=hidden_dim) for emb in emb_layers + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) @@ -94,8 +94,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, for i in range(1, depth): mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim) + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') ]) lstm = fluid.layers.dynamic_lstm( @@ -109,8 +109,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, input_tmp = [mix_hidden, lstm] feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len) + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') ]) return feature_out @@ -171,7 +171,7 @@ def train(use_cuda, save_dirname=None, is_local=True): # check other optimizers and check why out will be NAN sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( - learning_rate=0.0001, + learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) @@ -233,7 +233,7 @@ def train(use_cuda, save_dirname=None, is_local=True): print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test - if float(pass_precision) > 0.05: + if float(pass_precision) > 0.01: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index 3a1a0859ecfd4ac5337e2112f8b22e32d8474f22..830d78df8b9e56b45f7e928562ef4b89e88f696d 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -14,7 +14,7 @@ import contextlib import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as pd diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index e85b97a7f430b6d752baa179f27a7d15bc4d9a81..5ec6890c1b0dabd2804a92071b63c9610299e67c 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -14,7 +14,7 @@ from __future__ import print_function import argparse import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle import sys import numpy import unittest @@ -157,7 +157,6 @@ def train(nn_type, for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - pserver_endpoints = os.getenv("PSERVERS") trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 2ce66d32c993672793b0db213267d1f80b5c49dd..2172c275b8082689a6ff5f2c3c27a2ff4e92275a 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -16,7 +16,7 @@ import math import sys import os import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as layers diff --git a/python/paddle/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py index d2f3f7404697feb0768f873070b97aeb3ba0cd64..dedd153778d7ad9caeb5fa7090a980bc7f177dea 100644 --- a/python/paddle/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/test_understand_sentiment.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle import contextlib import math import numpy as np diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 26b97c3e254f54b83515436660e44d4908c98fbe..8929779de9448d036e1528b64330b37463ab3988 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import unittest import os diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index ad79e96b958b36a06c8a3cc990dbe3608e32c9ac..8818cf96fa8f08036f9e23aae786f67b5614b2b9 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -13,7 +13,7 @@ # limitations under the License. import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import math import sys diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index 204669d7e6176e9e8250e8aebc2d10441fa24b67..dfebb9a06ea4f290f128c486dcaccaeccdcef8c4 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -16,7 +16,7 @@ from __future__ import print_function import sys -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import math import sys diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index a24834a6f0b19d1265f6c8d7089d31583af82d1f..a1ca6d981fafb401985d03e9f2d63d1cb41b21b5 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -13,7 +13,7 @@ # limitations under the License. import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework diff --git a/python/paddle/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py index 7452ea2a34aa0c75d8e0990639b29705033af98b..8ea1b2b15cc0c0eb5bca67a9c5a6ac6c6774e7e2 100644 --- a/python/paddle/fluid/tests/demo/fc_gan.py +++ b/python/paddle/fluid/tests/demo/fc_gan.py @@ -19,7 +19,7 @@ import os import matplotlib import numpy -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid matplotlib.use('Agg') diff --git a/python/paddle/fluid/tests/test_cpp_reader.py b/python/paddle/fluid/tests/test_cpp_reader.py index 4b0d039b7e05a55980946a8949e32802e9e57c20..e54c73b2956dd99ee57804318130c261e133d21a 100644 --- a/python/paddle/fluid/tests/test_cpp_reader.py +++ b/python/paddle/fluid/tests/test_cpp_reader.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import numpy as np import sys diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index b2fd5ae29c724da52df0a5d3cb56d2ec9e5530f3..89f4c64975802dc1827ec17ed3626b91e36d6971 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -14,7 +14,7 @@ from __future__ import print_function import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid BATCH_SIZE = 128 diff --git a/python/paddle/fluid/tests/test_gradient_clip.py b/python/paddle/fluid/tests/test_gradient_clip.py index 68b682f68b1fd147b821cfdb1e0866cf8aa04bff..d530601f13be6810a8a99b13c92faf584df568f9 100644 --- a/python/paddle/fluid/tests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/test_gradient_clip.py @@ -13,7 +13,7 @@ # limitations under the License. import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid BATCH_SIZE = 128 diff --git a/python/paddle/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_mnist_if_else_op.py index 94395f6cfb4648967558ed265e798e3505c20fc1..d34f52db5ffc889f17513d034ad2c99f696b0cdf 100644 --- a/python/paddle/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/fluid/tests/test_mnist_if_else_op.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import paddle import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program from paddle.fluid.executor import Executor from paddle.fluid.optimizer import MomentumOptimizer import paddle.fluid.core as core -import paddle.v2 as paddle import unittest import numpy as np diff --git a/python/paddle/fluid/tests/unittests/.gitignore b/python/paddle/fluid/tests/unittests/.gitignore index ad02bdecf436bba925e2e3b7efb20c878df70dfd..3538a9c2009bb133609153427981fb66974377fa 100644 --- a/python/paddle/fluid/tests/unittests/.gitignore +++ b/python/paddle/fluid/tests/unittests/.gitignore @@ -2,3 +2,5 @@ mnist.recordio mnist_0.recordio mnist_1.recordio mnist_2.recordio +flowers.recordio +wmt16.recordio diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 0ad273c7161977e18f91f952fd3a9dc144bf73f0..356c3e64b3d03b520a1bec5b5e0174e1d8ee23e8 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1,6 +1,12 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +# The fully connected test is removed whe the WITH_MKLDNN flag is OFF +# Because the fully connected layer has only one kernel (MKLDNN) +if(NOT WITH_MKLDNN) + list(REMOVE_ITEM TEST_OPS test_fc_op) +endif(NOT WITH_MKLDNN) + if(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_recv_op) endif(NOT WITH_DISTRIBUTE) @@ -22,13 +28,16 @@ function(py_test_modules TARGET_NAME) set(multiValueArgs MODULES DEPS ARGS ENVS) cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_modules_ENVS} + COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_modules_ENVS} ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() +list(REMOVE_ITEM TEST_OPS test_sequence_expand) + # test time consuming OPs in a separate process for expliot parallism +list(REMOVE_ITEM TEST_OPS test_parallel_executor) list(REMOVE_ITEM TEST_OPS test_warpctc_op) list(REMOVE_ITEM TEST_OPS test_dyn_rnn) list(REMOVE_ITEM TEST_OPS test_mul_op) @@ -63,7 +72,10 @@ else() endforeach(TEST_OP) endif(WITH_FAST_BUNDLE_TEST) +# +py_test_modules(test_sequence_expand MODULES test_sequence_expand) # tests with high overhead +py_test_modules(test_parallel_executor MODULES test_parallel_executor) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) py_test_modules(test_mul_op MODULES test_mul_op) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 8393f7827b1c7d361ebea72f2cfc6033268772f0..299ab8e51f017e1980a8b40e3830fc42b1ff7ccc 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -334,7 +334,7 @@ class OpTest(unittest.TestCase): np.allclose( actual_t, expect_t, atol=atol), "Output (" + out_name + ") has diff at " + str(place) + - str(actual_t) + str(expect_t)) + str(actual_t) + "\n" + str(expect_t)) if isinstance(expect, tuple): self.assertListEqual(actual.lod(), expect[1], "Output (" + out_name + @@ -568,6 +568,6 @@ class OpTest(unittest.TestCase): fetch_list = [g for p, g in param_grad_list] executor = Executor(place) - return map( - np.array, - executor.run(prog, feed_dict, fetch_list, return_numpy=False)) + return map(np.array, + executor.run(prog, feed_dict, fetch_list, + return_numpy=False)) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 4a2b35322dd4b9718c83eb5ee679ada382938441..57d4a50e913c0d2994c62600f4e479056ed4c306 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -22,193 +22,504 @@ from scipy.special import expit class TestExp(OpTest): def setUp(self): self.op_type = "exp" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.exp(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.exp(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Exp(TestExp): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSigmoid(OpTest): def setUp(self): self.op_type = "sigmoid" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = 1 / (1 + np.exp(-x)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.008) + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.01) + + def init_dtype(self): + pass + + +class TestFP16Sigmoid(TestSigmoid): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestLogSigmoid(OpTest): def setUp(self): self.op_type = "logsigmoid" - self.inputs = { - 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.log(1 / (1 + np.exp(-x))) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.008) + def init_dtype(self): + pass + + +class TestFP16LogSigmoid(TestLogSigmoid): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestTanh(OpTest): def setUp(self): self.op_type = "tanh" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.tanh(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.tanh(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Tanh(TestTanh): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestTanhShrink(OpTest): def setUp(self): self.op_type = "tanh_shrink" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype) + out = x - np.tanh(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.008) + def init_dtype(self): + pass + + +class TestFP16TanhShrink(TestTanhShrink): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestHardShrink(OpTest): def setUp(self): self.op_type = "hard_shrink" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + threshold = 0.5 + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.copy(x) + out[(out >= -threshold) & (out <= threshold)] = 0 - self.inputs = {'X': x} self.attrs = {'lambda': threshold} - - t = np.copy(x) - t[(t >= -threshold) & (t <= threshold)] = 0 - self.outputs = {'Out': t} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.005) + def init_dtype(self): + pass + + +class TestFP16HardShrink(TestHardShrink): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSoftShrink(OpTest): def setUp(self): self.op_type = "softshrink" + self.dtype = np.float32 + self.init_dtype() + lambda_val = 0.1 + x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype) + out = np.copy(x) + out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * ( + out - lambda_val) + self.attrs = {'lambda': lambda_val} - self.inputs = { - 'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32") - } - y = np.copy(self.inputs['X']) - y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * ( - y - lambda_val) - self.outputs = {'Out': y} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16SoftShrink(TestSoftShrink): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.sqrt(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.sqrt(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Sqrt(TestSqrt): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestAbs(OpTest): def setUp(self): self.op_type = "abs" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) # Because we set delta = 0.005 in caculating numeric gradient, # if x is too small, such as 0.002, x_neg will be -0.003 # x_pos will be 0.007, so the numeric gradient is unaccurate. # we should avoid this x[np.abs(x) < 0.005] = 0.02 - self.inputs = {'X': x} - self.outputs = {'Out': np.abs(self.inputs['X'])} + out = np.abs(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Abs(TestAbs): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestCeil(OpTest): def setUp(self): self.op_type = "ceil" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") - self.inputs = {'X': x} - self.outputs = {'Out': np.ceil(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.ceil(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Ceil(TestCeil): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestFloor(OpTest): def setUp(self): self.op_type = "floor" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") - self.inputs = {'X': x} - self.outputs = {'Out': np.floor(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.floor(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Floor(TestFloor): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + + +class TestCos(OpTest): + def setUp(self): + self.op_type = "cos" + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.cos(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.007) + + def init_dtype(self): + pass + + +class TestFP16Cos(TestCos): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + + +class TestSin(OpTest): + def setUp(self): + self.op_type = "sin" + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.sin(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.007) + + def init_dtype(self): + pass + + +class TestFP16Sin(TestSin): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestRound(OpTest): def setUp(self): self.op_type = "round" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") - self.inputs = {'X': x} - self.outputs = {'Out': np.round(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.round(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Round(TestRound): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestRelu(OpTest): def setUp(self): @@ -250,222 +561,463 @@ class TestFP16Relu(TestRelu): class TestBRelu(OpTest): def setUp(self): self.op_type = "brelu" - x = np.random.uniform(-1, 1, [4, 4]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) t_min = 1.0 t_max = 4.0 # The same with TestAbs x[np.abs(x - t_min) < 0.005] = t_min + 0.02 x[np.abs(x - t_max) < 0.005] = t_max + 0.02 - - self.inputs = {'X': x} - self.attrs = {'t_min': t_min, 't_max': t_max} t = np.copy(x) t[t < t_min] = t_min t[t > t_max] = t_max + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'t_min': t_min, 't_max': t_max} self.outputs = {'Out': t} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.02) + def init_dtype(self): + pass + + +class TestFP16BRelu(TestBRelu): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestRelu6(OpTest): def setUp(self): self.op_type = "relu6" - x = np.random.uniform(-1, 1, [4, 10]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype) threshold = 6.0 # The same with TestAbs x[np.abs(x) < 0.005] = 0.02 x[np.abs(x - threshold) < 0.005] = threshold + 0.02 + out = np.minimum(np.maximum(x, 0), threshold) - self.inputs = {'X': x} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'threshold': threshold} - self.outputs = { - 'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold) - } + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.02) + def init_dtype(self): + pass + + +class TestFP16Relu6(TestRelu6): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSoftRelu(OpTest): def setUp(self): self.op_type = "soft_relu" - x = np.random.uniform(-3, 3, [4, 4]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) threshold = 2.0 # The same reason with TestAbs x[np.abs(x - threshold) < 0.005] = threshold + 0.02 x[np.abs(x + threshold) < 0.005] = -threshold + 0.02 - self.inputs = {'X': x} - self.attrs = {'threshold': threshold} t = np.copy(x) t[t < -threshold] = -threshold t[t > threshold] = threshold - self.outputs = {'Out': np.log((np.exp(t) + 1))} + out = np.log((np.exp(t) + 1)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'threshold': threshold} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.02) + def init_dtype(self): + pass + + +class TestFP16SoftRelu(TestSoftRelu): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestELU(OpTest): def setUp(self): self.op_type = "elu" - x = np.random.uniform(-3, 3, [4, 4]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) alpha = 1. + out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here self.inputs = {'X': x} self.attrs = {'alpha': alpha} - self.outputs = { - 'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) - } + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.02) + def init_dtype(self): + pass + + +class TestFP16ELU(TestELU): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestReciprocal(OpTest): def setUp(self): self.op_type = "reciprocal" - self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} - self.outputs = {'Out': np.reciprocal(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + out = np.reciprocal(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.01) + def init_dtype(self): + pass + + +class TestFP16Reciprocal(TestReciprocal): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestLog(OpTest): def setUp(self): self.op_type = "log" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.log(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.log(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Log(TestLog): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSquare(OpTest): def setUp(self): self.op_type = "square" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } - self.outputs = {'Out': np.square(self.inputs['X'])} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.square(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Square(TestSquare): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestPow(OpTest): def setUp(self): self.op_type = "pow" - self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + out = np.power(x, 3) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'factor': 3.0} - self.outputs = {'Out': np.power(self.inputs['X'], 3)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.02) + def init_dtype(self): + pass + + +class TestFP16Pow(TestPow): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=5e-2) + class TestSTanh(OpTest): def setUp(self): self.op_type = "stanh" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") - } + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) scale_a = 2.0 / 3.0 scale_b = 1.7159 + out = scale_b * np.tanh(x * scale_a) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} - self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16STanh(TestSTanh): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSoftplus(OpTest): def setUp(self): self.op_type = "softplus" - self.inputs = { - 'X': np.random.uniform(-1, 1, [11, 17]).astype("float64") - } - self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))} + self.dtype = np.float64 + self.init_dtype() + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.log(1 + np.exp(x)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Softplus(TestSoftplus): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSoftsign(OpTest): def setUp(self): self.op_type = "softsign" - self.inputs = { - 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") - } - self.outputs = { - 'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) - } + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.divide(x, 1 + np.abs(x)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Softsign(TestSoftsign): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestThresholdedRelu(OpTest): def setUp(self): self.op_type = "thresholded_relu" + self.dtype = np.float32 + self.init_dtype() + threshold = 0.25 self.relative_error = 0.005 - X = np.random.uniform(-1, 1, [11, 17]).astype("float32") + X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) # Same reason as TestAbs X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2 + out = (X > threshold) * X - self.inputs = {'X': X} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.attrs = {'threshold': threshold} - self.outputs = {'Out': (X > threshold) * X} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=self.relative_error) + def init_dtype(self): + pass + + +class TestFP16ThresholdedRelu(TestThresholdedRelu): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestHardSigmoid(OpTest): def setUp(self): self.op_type = "hard_sigmoid" + self.dtype = np.float32 + self.init_dtype() + self.relative_error = 0.002 X = np.random.uniform(-5, 5, [2, 2]).astype("float32") @@ -474,7 +1026,6 @@ class TestHardSigmoid(OpTest): lower_threshold = -offset / slope upper_threshold = (1 - offset) / slope - self.inputs = {'X': X} # Same reason as TestAbs X[np.abs(X - lower_threshold) < self.relative_error] = \ lower_threshold + 0.2 @@ -482,34 +1033,103 @@ class TestHardSigmoid(OpTest): upper_threshold - 0.2 temp = X * slope + offset - self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))} + out = np.maximum(0.0, np.minimum(1.0, temp)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.002) + def init_dtype(self): + pass + + +class TestFP16HardSigmoid(TestHardSigmoid): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestSwish(OpTest): def setUp(self): self.op_type = "swish" - X = np.random.uniform(0.1, 1, [11, 17]).astype("float32") - self.inputs = {'X': X} - self.attrs = {'beta': 2.3} - self.outputs = {'Out': X * expit(self.attrs['beta'] * X)} + self.dtype = np.float32 + self.init_dtype() + + X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + beta = 2.3 + out = X * expit(beta * X) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} + self.attrs = {'beta': beta} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.008) + def init_dtype(self): + pass + + +class TestFP16Swish(TestSwish): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + #--------------------test MKLDNN-------------------- -class TestMKLDNNRelu(TestRelu): +class TestMKLDNNReluDim2(TestRelu): + def setUp(self): + super(TestMKLDNNReluDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNTanhDim2(TestTanh): + def setUp(self): + super(TestMKLDNNTanhDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNSqrtDim2(TestSqrt): + def setUp(self): + super(TestMKLDNNSqrtDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNAbsDim2(TestAbs): + def setUp(self): + super(TestMKLDNNAbsDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNReluDim4(TestRelu): def setUp(self): - super(TestMKLDNNRelu, self).setUp() + super(TestMKLDNNReluDim4, self).setUp() x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") # The same reason with TestAbs @@ -521,9 +1141,9 @@ class TestMKLDNNRelu(TestRelu): self.attrs = {"use_mkldnn": True} -class TestMKLDNNTanh(TestTanh): +class TestMKLDNNTanhDim4(TestTanh): def setUp(self): - super(TestMKLDNNTanh, self).setUp() + super(TestMKLDNNTanhDim4, self).setUp() self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") @@ -532,9 +1152,9 @@ class TestMKLDNNTanh(TestTanh): self.attrs = {"use_mkldnn": True} -class TestMKLDNNSqrt(TestSqrt): +class TestMKLDNNSqrtDim4(TestSqrt): def setUp(self): - super(TestMKLDNNSqrt, self).setUp() + super(TestMKLDNNSqrtDim4, self).setUp() self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") @@ -543,9 +1163,9 @@ class TestMKLDNNSqrt(TestSqrt): self.attrs = {"use_mkldnn": True} -class TestMKLDNNAbs(TestAbs): +class TestMKLDNNAbsDim4(TestAbs): def setUp(self): - super(TestMKLDNNAbs, self).setUp() + super(TestMKLDNNAbsDim4, self).setUp() x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") # The same reason with TestAbs diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 10aa63e18a6eeaa44e5b12f7532998dca2bc5e9f..7ecf9a1459ffc9740ae8c12df3902163ee689f59 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -14,23 +14,13 @@ import unittest import numpy as np -from op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator +import paddle.fluid as fluid +from op_test import OpTest from paddle.fluid.framework import grad_var_name -def get_backward_op(scope, op, no_grad_set): - backward_op = core.Operator.backward(op, no_grad_set) - for input in backward_op.input_vars(): - var = scope.var(input) - var.get_tensor() - for output in backward_op.output_vars(): - var = scope.var(output) - var.get_tensor() - return backward_op - - def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): x_shape = x.shape if len(x_shape) == 2: @@ -64,11 +54,6 @@ def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): def _reference_training(x, scale, offset, epsilon, data_format): x_shape = x.shape - if len(x_shape) == 2: - if data_format == "NCHW": - x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) - else: - x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) if data_format == "NCHW": n, c, h, w = x.shape @@ -88,8 +73,6 @@ def _reference_training(x, scale, offset, epsilon, data_format): offset_tile = np.reshape(offset, (1, c, 1, 1)) offset_tile = np.reshape(offset_tile, (1, c, 1, 1)) y = normalized * scale_tile + offset_tile - if len(x_shape) == 2: - y = np.reshape(y, (y.shape[0], y.shape[1])) return y, mean, var elif data_format == "NHWC": x_square = x * x @@ -100,59 +83,42 @@ def _reference_training(x, scale, offset, epsilon, data_format): var = x_square_sum / element_count - mean * mean normalized = (x - mean) / np.sqrt(var + epsilon) y = normalized * scale + offset - if len(x_shape) == 2: - y = np.reshape(y, x_shape) return y, mean, var else: raise ValueError("Unknown data order.") -def _reference_grad(x, grad_y, scale, mean, var, epsilon, data_format): +def _reference_grad(x, y_grad, scale, mean, var, epsilon, data_format): # Use the following formulas to calculate gradients: # grad_scale = # sum(grad_y * (x - mean)) * rsqrt(var + epsilon) # # grad_offset = sum(output_y) # - # grad_x = + # x_grad = # 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) - # (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon)) # transfer from (N, C, H, W) to (N, H, W, C) to simplify computation - x_shape = x.shape - - if len(x_shape) == 2: - if data_format == "NCHW": - x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) - grad_y = np.reshape(grad_y, - (grad_y.shape[0], grad_y.shape[1], 1, 1)) - else: - x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) - grad_y = np.reshape(grad_y, - (grad_y.shape[0], 1, 1, grad_y.shape[1])) - if data_format == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) - grad_y = np.transpose(grad_y, (0, 2, 3, 1)) + y_grad = np.transpose(y_grad, (0, 2, 3, 1)) - # raise ValueError("data_format must be NHWC, got %s." % data_format) - grad_x = scale * (grad_y - np.mean( - grad_y, axis=(0, 1, 2)) - (x - mean) * np.mean( - grad_y * (x - mean), axis=(0, 1, 2)) / + x_grad = scale * (y_grad - np.mean( + y_grad, axis=(0, 1, 2)) - (x - mean) * np.mean( + y_grad * (x - mean), axis=(0, 1, 2)) / (var + epsilon)) / np.sqrt(var + epsilon) - grad_scale = np.sum(grad_y * (x - mean) / np.sqrt(var + epsilon), + grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2)) - grad_offset = np.sum(grad_y, axis=(0, 1, 2)) + grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W if data_format == "NCHW": - grad_x = np.transpose(grad_x, (0, 3, 1, 2)) + x_grad = np.transpose(x_grad, (0, 3, 1, 2)) x = np.transpose(x, (0, 3, 1, 2)) - grad_y = np.transpose(grad_y, (0, 3, 1, 2)) + y_grad = np.transpose(y_grad, (0, 3, 1, 2)) - if len(x_shape) == 2: - grad_x = np.reshape(grad_x, x_shape) - return grad_x, grad_scale, grad_offset + return x_grad, grad_scale, grad_offset def create_or_get_tensor(scope, var_name, var, place): @@ -186,7 +152,7 @@ def set_output_grad(scope, outputs, place, feed_dict=None): __set_tensor__(output, data) -class TestBatchNormOpInference(OpTest): +class TestBatchNormOpInference(unittest.TestCase): def setUp(self): self.dtype = np.float32 @@ -304,231 +270,121 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): self.check_with_place(place, data_format, self.dtype, [2, 3]) -class TestBatchNormOpTraining(OpTest): +class TestBatchNormOpTraining(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): + if not np.allclose(np.array(tensor), np_array, atol=atol): + import pdb + pdb.set_trace() self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def test_python_testing(self): - data_format = "NHWC" - epsilon = 0.00001 - - n, h, w, c = 2, 3, 4, 5 - x_shape = [n, h, w, c] - scale_shape = [c] - - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - - mean = np.zeros(scale_shape).astype(np.float32) - variance = np.ones(scale_shape).astype(np.float32) - - y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, - epsilon, "NHWC") - - # running N, C, H, W case - # should produce the same results - x_shape2 = [n, c, h, w] - x_val2 = np.transpose(x_val, (0, 3, 1, 2)) - y_out2 = _reference_testing(x_val2, scale_val, bias_val, mean, variance, - epsilon, "NCHW") - - # transfer (N, C, H, W) back to (N, H, W, C) - y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) - self.__assert_close(y_out, y_out2_trans, "inference output") - print 'python: NHWC, NCHW, inference checking passed' - - def test_python_training(self): - data_format = "NHWC" - epsilon = 0.00001 - momentum = 0.9 - - # N, H, W, C: 2, 3, 4, 2 - n, h, w, c = 2, 3, 4, 5 - x_shape = [n, h, w, c] - scale_shape = [c] - - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - - mean = np.zeros(scale_shape).astype(np.float32) - variance = np.ones(scale_shape).astype(np.float32) - - # run forward - y_out, saved_mean, var_ref = _reference_training( - x_val, scale_val, bias_val, epsilon, "NHWC") - - # - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) - - # running N, C, H, W case - # should produce the same results - x_shape2 = [n, c, h, w] - x_val2 = np.transpose(x_val, (0, 3, 1, 2)) - y_out2, saved_mean2, var_ref2 = _reference_training( - x_val2, scale_val, bias_val, epsilon, "NCHW") - - self.__assert_close(saved_mean, saved_mean2, "batch mean") - self.__assert_close(var_ref, var_ref2, "batch variance") - - # transfer (N, C, H, W) back to (N, H, W, C) - y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) - self.__assert_close(y_out, y_out2_trans, "batch output") - print 'python: NHWC, NCHW, forward checking passed' - - # test backward now - # NHWC - self.y_grad = np.random.random_sample(x_shape).astype(np.float32) - y_grad = self.y_grad - # y_grad = np.ones(x_shape).astype(np.float32) - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, "NHWC") - - # NCHW - y_grad2 = np.transpose(y_grad, (0, 3, 1, 2)) - # y_grad2 = np.ones(x_shape2).astype(np.float32) - x_grad_ref2, scale_grad_ref2, bias_grad_ref2 = _reference_grad( - x_val2, y_grad2, scale_val, saved_mean2, var_ref2, epsilon, "NCHW") - - self.__assert_close(scale_grad_ref, scale_grad_ref2, "scale gradient") - self.__assert_close(bias_grad_ref, bias_grad_ref2, "bias gradient") - - x_grad_transpose = np.transpose(x_grad_ref2, (0, 2, 3, 1)) - self.__assert_close(x_grad_ref, x_grad_transpose, "x gradient") - print 'python: NHWC, NCHW, backward checking passed' - def test_forward_backward(self): def test_with_place(place, data_layout, shape): # attr epsilon = 0.00001 momentum = 0.9 - - if len(shape) == 2: - x_shape = shape - c = shape[1] + if data_layout == "NCHW": + n, c, h, w = shape[0], shape[1], shape[2], shape[3] else: - # n, h, w, c = 2, 3, 4, 2 n, h, w, c = shape[0], shape[1], shape[2], shape[3] - if data_format == "NHWC": - x_shape = [n, h, w, c] - elif data_format == "NCHW": - x_shape = [n, c, h, w] - else: - raise ValueError("Unknown data type.") scale_shape = [c] - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - + np.random.seed(123) + x = np.random.random_sample(shape).astype(np.float32) + scale = np.random.random_sample(scale_shape).astype(np.float32) + bias = np.random.random_sample(scale_shape).astype(np.float32) mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) # run forward - y_out, saved_mean, var_ref = _reference_training( - x_val, scale_val, bias_val, epsilon, data_format) - - # update moving mean and variance + y, saved_mean, var_ref = _reference_training(x, scale, bias, + epsilon, data_layout) mean_out = saved_mean * (1. - momentum) + momentum * mean variance_out = var_ref * (1. - momentum) + momentum * variance saved_variance = 1. / np.sqrt(var_ref + epsilon) - - # for gradient test - # y_grad = np.ones(x_shape).astype(np.float32) - y_grad = np.zeros(x_shape).astype(np.float32) - if len(y_grad.shape) == 2: - y_grad[0, 0] = 1. - else: - y_grad[0, 0, 0, 0] = 1. - # y_grad = np.random.random_sample(x_shape).astype(np.float32) - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, - data_format) - - scope = core.Scope() - - # create input - x_tensor = create_or_get_tensor(scope, "x_val", x_val, place) - scale_tensor = create_or_get_tensor(scope, "scale_val", scale_val, - place) - bias_tensor = create_or_get_tensor(scope, "bias_val", bias_val, - place) - mean_tensor = create_or_get_tensor(scope, "mean", mean, place) - variance_tensor = create_or_get_tensor(scope, "variance", variance, - place) - - # create output - y_tensor = create_or_get_tensor(scope, "y_out", None, place) - saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, - place) - saved_variance_tensor = create_or_get_tensor( - scope, "saved_variance", None, place) - mean_out_tensor = mean_tensor - variance_out_tensor = variance_tensor - - batch_norm_op = Operator( - "batch_norm", - # inputs - X="x_val", - Scale="scale_val", - Bias="bias_val", - Mean="mean", - Variance="variance", - # outputs - Y="y_out", - MeanOut="mean", - VarianceOut="variance", - SavedMean="saved_mean", - SavedVariance="saved_variance", - # attrs - is_test=False, - data_layout=data_layout, - momentum=momentum, - epsilon=epsilon) - - batch_norm_op.run(scope, place) - - # check forward result - self.__assert_close(y_tensor, y_out, "y_out") - self.__assert_close(saved_mean_tensor, saved_mean, "saved_mean") - self.__assert_close(saved_variance_tensor, saved_variance, - "saved_variance") - self.__assert_close(mean_out_tensor, mean_out, "mean_out") - if isinstance(place, core.CUDAPlace): - atol = 5e-2 - else: - atol = 1e-4 - self.__assert_close(variance_out_tensor, variance_out, - "variance_out", atol) - print "op test forward passed: ", str(place), data_layout - # run backward - batch_norm_op_grad = get_backward_op(scope, batch_norm_op, set()) - set_output_grad( - scope, - ["y_out", "mean", "variance", "saved_mean", "saved_variance"], - place, - feed_dict={"y_out": y_grad}) - batch_norm_op_grad.run(scope, place) - - x_grad_tensor = create_or_get_tensor(scope, - grad_var_name("x_val"), None, - place) - scale_grad_tensor = create_or_get_tensor(scope, - grad_var_name("scale_val"), - None, place) - bias_grad_tensor = create_or_get_tensor(scope, - grad_var_name("bias_val"), - None, place) + y_grad = np.random.random_sample(shape).astype(np.float32) + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_format) + + var_dict = locals() + var_dict['y@GRAD'] = y_grad + + var_names = [ + 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean', + 'saved_variance' + ] + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + bn_op = block.append_op( + type="batch_norm", + inputs={ + "X": block.var('x'), + "Scale": block.var('scale'), + "Bias": block.var('bias'), + "Mean": block.var('mean'), + "Variance": block.var('variance') + }, + outputs={ + "Y": block.var('y'), + "MeanOut": block.var('mean'), # share the same memory + "VarianceOut": + block.var('variance'), # share the same memory + "SavedMean": block.var('saved_mean'), + "SavedVariance": block.var('saved_variance') + }, + attrs={ + "momentum": momentum, + "epsilon": epsilon, + "is_test": False, + "data_layout": data_layout + }) + block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) + + # generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + bn_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in + ['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD'] + }, + fetch_list=[ + 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', + 'x@GRAD', 'scale@GRAD', 'bias@GRAD' + ]) + + self.__assert_close(y, out[0], "y") + self.__assert_close(mean_out, out[1], "mean") + self.__assert_close(variance_out, out[2], "variance", 1e-3) + self.__assert_close(saved_mean, out[3], "saved_mean") + self.__assert_close(saved_variance, out[4], "saved_variance", 1e-3) + self.__assert_close(x_grad, out[5], "x_grad") + self.__assert_close(scale_grad, out[6], "scale_grad") + self.__assert_close(bias_grad, out[7], "bias_grad") - # check gradient output - self.__assert_close(x_grad_tensor, x_grad_ref, "x_grad") - self.__assert_close(scale_grad_tensor, scale_grad_ref, "scale_grad") - self.__assert_close(bias_grad_tensor, bias_grad_ref, "bias_grad") - print "op test backward passed: ", str(place), data_layout + print "op test forward passed: ", str(place), data_layout places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): @@ -537,7 +393,6 @@ class TestBatchNormOpTraining(OpTest): for place in places: for data_format in ["NCHW", "NHWC"]: test_with_place(place, data_format, [2, 3, 4, 5]) - test_with_place(place, data_format, [2, 3]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cond_op.py b/python/paddle/fluid/tests/unittests/test_cond_op.py deleted file mode 100644 index 66fbae961a2701e79da5222ae2689108335c4065..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_cond_op.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import paddle.fluid.core as core -import unittest -import numpy as np -from paddle.fluid.op import Operator, CondOp - - -class PySimpleCond(object): - ''' - A simple implementation of dynamic if-else based on numpy - ''' - - def __init__(self): - array = [1] * 10 - for i in range(1, 10, 2): - array[i] = 0 - self.cond = np.array(array) - self.x = np.ones(shape=(10, 1)).astype("float32") - - def forward(self): - self.index_t = np.where(self.cond == 1) - self.index_f = np.where(self.cond == 0) - y_t = self.x[self.index_t] - y_f = self.x[self.index_f] - y_t = y_t * 2. - y_f = y_f * (-2.) - output = np.zeros(shape=(10, 1)) - output[self.index_t] = y_t - output[self.index_f] = y_f - return output - - -class PySimpleCondTest(unittest.TestCase): - def setUp(self): - self.condnn = PySimpleCond() - - def test_forward(self): - output = self.condnn.forward() - - -def create_tensor(scope, name, shape, np_data): - tensor = scope.var(name).get_tensor() - tensor.set_dims(shape) - tensor.set(np_data, core.CPUPlace()) - return tensor - - -class TestCondOp(unittest.TestCase): - ''' - Test CondOp - - equation: - cond = [True, False, True, False, ...] - y[index_t] = x[index_t] * 2. - y[index_f] = x[index_f] * -2. - outputs: - y - ''' - - def setUp(self): - self.py_cond = PySimpleCond() - - def forward(self): - self.scope = core.Scope() - self.create_global_variables() - self.create_cond_op() - self.create_sub_net() - self.condop.run(self.scope, core.CPUPlace()) - return np.array(self.scope.find_var("Out").get_tensor()) - - def create_global_variables(self): - x_np_data = self.py_cond.x - create_tensor(self.scope, "X", [10, 1], x_np_data) - cond_np_data = self.py_cond.cond.astype("int32") - create_tensor(self.scope, "cond", [10, 1], cond_np_data) - self.scope.var("SubScopes") - self.scope.var("IndexTensors") - self.scope.var("Out") - - def create_cond_op(self): - self.condop = CondOp( - Cond="cond", - Xs=["X"], - Outs=["Out"], - SubScopes="SubScopes", - IndexTensors="IndexTensors") - - def create_sub_net(self): - truenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Out='Out', scale=2.) - truenet.append_op(scale_op_t) - truenet.complete_add_op(True) - self.condop.set_truenet(truenet) - - falsenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Out='Out', scale=-2.) - falsenet.append_op(scale_op_t) - falsenet.complete_add_op(True) - self.condop.set_falsenet(falsenet) - - def test_forward(self): - print 'test cond op forward' - pd_output = self.forward() - py_output = self.py_cond.forward() - print 'pd_output', pd_output - print - print 'py_output', py_output - self.assertEqual(pd_output.shape, py_output.shape) - print 'test passed' - return 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 4b6e3fb69a12095c77f343515fe3b6d1f3fccb14..65606a0b4373b28036096cf046da5143a3b8bcd0 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -97,8 +97,11 @@ class TestConv2dOp(OpTest): } self.outputs = {'Output': output} + def testcudnn(self): + return core.is_compiled_with_cuda() and self.use_cudnn + def test_check_output(self): - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) else: @@ -107,7 +110,7 @@ class TestConv2dOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, @@ -121,7 +124,7 @@ class TestConv2dOp(OpTest): def test_check_grad_no_filter(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Input'], @@ -138,7 +141,7 @@ class TestConv2dOp(OpTest): def test_check_grad_no_input(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Filter'], diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py index 2b7bbf9218f9b8fd8f5b29ac3cbc2f9680f471eb..67b03f635b6f8a3003efabe5425325080d47f61c 100644 --- a/python/paddle/fluid/tests/unittests/test_debugger.py +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -51,7 +51,9 @@ class TestDebugger(unittest.TestCase): outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) - print(debuger.pprint_program_codes(p.desc)) + print(debuger.pprint_program_codes(p)) + + debuger.draw_block_graphviz(p.block(0), path="./test.dot") if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index df7ab0d29bdfc9410cd7dd4a8f2a7cd440ef4aba..0faed94deb4808783027d776e0f4c61da0db457a 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle import unittest import numpy diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index b03a70f1b9e61162d37541ffeba8510fc11c605a..d3f63ee2c414a71309be8f0af6d3e5912078ecdb 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2 as paddle +import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid.backward import append_backward diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py new file mode 100644 index 0000000000000000000000000000000000000000..3f547f3c484bf034a87823a75d946ef130a5cb70 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -0,0 +1,99 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def fully_connected_naive(input, weights, bias_data=None): + in_n, in_c, in_h, in_w = input.shape + w_h, w_c = weights.shape + + x_data = np.reshape(input, [in_n, in_c * in_h * in_w]) + w_data = np.transpose(np.reshape(weights, (w_c, in_c * in_h * in_w))) + result = None + + if not bias_data: + result = np.dot(x_data, w_data) + else: + result = np.dot(x_data, w_data) + bias_data + + return result + + +class MatrixGenerate: + def __init__(self, mb, ic, oc, h, w): + self.input = np.random.random((mb, ic, h, w)).astype("float32") + self.weights = np.random.random((ic * h * w, oc)).astype("float32") + + +class TestFCMKLDNNOp(OpTest): + def setUp(self): + self.op_type = "fc" + self.use_mkldnn = True + self.with_bias = True + self.matrix = MatrixGenerate(1, 10, 15, 3, 3) + + self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights} + + self.attrs = { + 'use_mkldnn': self.use_mkldnn, + 'with_bias': self.with_bias + } + + self.outputs = { + 'Out': fully_connected_naive(self.matrix.input, self.matrix.weights) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(set(['Input', 'W']), 'Out', max_relative_error=0.9) + + def test_check_grad_no_weight(self): + self.check_grad( + ['Input'], 'Out', max_relative_error=0.5, no_grad_set=set('W')) + + +class TestFCMKLDNNOp1(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 15, 48, 2, 2) + + +class TestFCMKLDNNOp2(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 32, 40, 1, 1) + + +class TestFCMKLDNNOp3(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 2, 4, 1, 1) + + +class TestFCMKLDNNOp4(TestFCMKLDNNOp): + def init_op_type(self): + self.with_bias = False + self.matrix = MatrixGenerate(2, 32, 48, 2, 2) + + +class TestFCMKLDNNOp4(TestFCMKLDNNOp): + def init_op_type(self): + self.with_bias = False + self.matrix = MatrixGenerate(2, 32, 1000, 6, 6) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 8c67e45b7fc997012af5f678f21271ad8b220edc..69365db4d104a1b69916a605534eff83e242289f 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -15,10 +15,8 @@ import unittest import numpy as np from operator import mul -from op_test import OpTest import paddle.fluid.core as core -from paddle.fluid.op import Operator -from paddle.fluid.framework import grad_var_name +import paddle.fluid as fluid np.random.random(123) @@ -70,161 +68,93 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): return grad_x, d_scale, d_bias -def get_backward_op(scope, op, no_grad_set): - backward_op = core.Operator.backward(op, no_grad_set) - for input in backward_op.input_vars(): - var = scope.var(input) - var.get_tensor() - for output in backward_op.output_vars(): - var = scope.var(output) - var.get_tensor() - return backward_op - - -def create_or_get_tensor(scope, var_name, var, place): - tensor = scope.var(var_name).get_tensor() - if var is not None: - assert isinstance(var, np.ndarray) - tensor.set_lod([[]]) - tensor.set_dims(var.shape) - tensor.set(var, place) - return tensor - - -def set_output_grad(scope, outputs, place, feed_dict=None): - def __set_tensor__(name, data=None): - out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.var(grad_var_name(name)).get_tensor() - out_dtype = out_tensor.dtype() - if data is None: - if out_dtype == core.VarDesc.VarType.FP64: - data = np.ones(out_tensor.shape(), dtype=np.float64) - elif out_dtype == core.VarDesc.VarType.FP32: - data = np.ones(out_tensor.shape(), dtype=np.float32) - else: - raise ValueError("Not supported data type " + str(out_dtype)) - grad_tensor.set(data, place) - - for output in outputs: - data = None - if output in feed_dict: - data = feed_dict[output] - __set_tensor__(output, data) - - -class TestLayerNormdOp(OpTest): +class TestLayerNormdOp(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def __assert_grad_close(self, - tensor, - np_array, - name, - place, - max_relative_error=0.02): - a = np.array(tensor) - b = np_array - abs_a = np.abs(a) - abs_a[abs_a < 1e-5] = 1 - - diff_mat = np.abs(a - b) / abs_a - max_diff = np.max(diff_mat) - - def err_msg(): - offset = np.argmax(diff_mat > max_relative_error) - return ("%s Variable %s max gradient diff %f over limit %f, " - "the first error element is %d, %f, %f") % ( - "Gradient Check On %s" % str(place), name, max_diff, - max_relative_error, offset, a.flatten()[offset], - b.flatten()[offset]) - - self.assertLessEqual(max_diff, max_relative_error, err_msg()) - def check_forward_backward(self, shape, begin_norm_axis): - def test_with_place(place, shape, begin_norm_axis=1): - # setUp - assert begin_norm_axis > 0 and begin_norm_axis < len( - shape), 'begin_norm_axis must be between 0 and len(shape)-1.' + def test_with_place(place, shape, begin_norm_axis): # attr epsilon = 0.00001 x_shape = shape D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) scale_shape = [D] - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) + np.random.seed(123) + x = np.random.random_sample(x_shape).astype(np.float32) + scale = np.random.random_sample(scale_shape).astype(np.float32) + bias = np.random.random_sample(scale_shape).astype(np.float32) y_grad = np.random.random_sample(x_shape).astype(np.float32) - # run forward - y_out, saved_mean, var_ref = _reference_layer_norm_naive( - x_val, scale_val, bias_val, epsilon, begin_norm_axis) - naive_fw = {"Y": y_out, "Mean": saved_mean, "Variance": var_ref} - - # get gradient - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, begin_norm_axis) - naive_grad = { - "X": x_grad_ref, - "Scale": scale_grad_ref, - "Bias": bias_grad_ref - } - - scope = core.Scope() - - # create input - input_map = {"X": x_val, "Scale": scale_val, "Bias": bias_val} - for i_name in input_map: - create_or_get_tensor(scope, i_name, input_map[i_name], place) - - # create output - output_map = {"Y": None, "Mean": None, "Variance": None} - output_tensor = {} - for o_name in output_map: - output_tensor[o_name] = create_or_get_tensor( - scope, o_name, output_map[o_name], place) - - layer_norm_op = Operator( - "layer_norm", - # inputs - X="X", - Scale="Scale", - Bias="Bias", - # outputs - Y="Y", - Mean="Mean", - Variance="Variance", - # attrs - epsilon=epsilon, - begin_norm_axis=begin_norm_axis) - - layer_norm_op.run(scope, place) - - # check forward result - atol = 5e-2 if isinstance(place, core.CUDAPlace) else 1e-4 - for o_tensor in output_tensor: - self.__assert_close(output_tensor[o_tensor], naive_fw[o_tensor], - o_tensor, atol) - - # run backward - layer_norm_op_grad = get_backward_op(scope, layer_norm_op, set()) - set_output_grad( - scope, ["Y", "Mean", "Variance"], - place, - feed_dict={"Y": y_grad}) - layer_norm_op_grad.run(scope, place) - - # get output - grad_tensor = {} - for o_name in naive_grad: - grad_tensor[o_name] = x_ = create_or_get_tensor( - scope, grad_var_name(o_name), None, place) - - # check gradient output - for o_grad in naive_grad: - self.__assert_grad_close(grad_tensor[o_grad], - naive_grad[o_grad], o_grad + "@GRAD", - place) + # reference forward & backward + y, mean, variance = _reference_layer_norm_naive( + x, scale, bias, epsilon, begin_norm_axis) + x_grad, scale_grad, bias_grad = _reference_layer_norm_grad( + x, y_grad, scale, mean, variance, begin_norm_axis) + + var_dict = locals() + var_dict['y@GRAD'] = y_grad + var_names = [ + 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'y@GRAD' + ] + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + layer_norm_op = block.append_op( + type="layer_norm", + inputs={ + "X": block.var('x'), + "Scale": block.var('scale'), + "Bias": block.var('bias'), + }, + outputs={ + "Y": block.var('y'), + "Mean": block.var('mean'), # share the same memory + "Variance": + block.var('variance'), # share the same memory + }, + attrs={ + "epsilon": epsilon, + "begin_norm_axis": begin_norm_axis + }) + + # generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + layer_norm_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + out = exe.run(program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=[ + 'y', 'mean', 'variance', 'x@GRAD', + 'scale@GRAD', 'bias@GRAD' + ]) + self.__assert_close(y, out[0], "y") + self.__assert_close(mean, out[1], "mean") + self.__assert_close(variance, out[2], "variance", 1e-3) + self.__assert_close(x_grad, out[3], "x_grad") + self.__assert_close(scale_grad, out[4], "scale_grad", 1e-3) + self.__assert_close(bias_grad, out[5], "bias_grad") places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): @@ -237,15 +167,6 @@ class TestLayerNormdOp(OpTest): self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) - def test_check_forward_backward_with_scale(self): - pass # TODO(zcd) - - def test_check_forward_backward_with_bias(self): - pass # TODO(zcd) - - def test_check_forward_backward(self): - pass # TODO(zcd) - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 2179826d81f715d6d280aea28a76f919330dd644..a1be2d671ddc5c689b16319fcf5bf12dca5dde7e 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -32,7 +32,6 @@ class TestBook(unittest.TestCase): cost = layers.square_error_cost(input=y_predict, label=y) avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) - program.append_backward(avg_cost) print(str(program)) @@ -94,8 +93,6 @@ class TestBook(unittest.TestCase): cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(cost) - program.append_backward(avg_cost) - print(str(program)) def test_word_embedding(self): @@ -343,6 +340,16 @@ class TestBook(unittest.TestCase): print(layers.lod_reset(x=x, y=y)) print(str(program)) + def test_label_smooth(self): + program = Program() + with program_guard(program): + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) + smooth_label = layers.label_smooth( + label=one_hot_label, epsilon=0.1, dtype="float32") + self.assertIsNotNone(smooth_label) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index ed920ad388ff0e01887404e70fe82565b4cd28fa..f8d5785fbfe64843f4aa3b96b24809df60980c74 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -96,5 +96,47 @@ class TestLookupTableIdsIsSelectedRows(OpTest): self.check_with_place(place) +class TestLookupTableWIsSelectedRows(OpTest): + def check_with_place(self, place): + scope = core.Scope() + + # create and initialize Id Variable + ids_tensor = scope.var('Ids').get_tensor() + ids_array = np.array([[0], [4], [3], [5]]).astype("int64") + ids_tensor.set(ids_array, place) + + # create and initialize W Variable + rows = [0, 1, 2, 3, 4, 5, 6] + row_numel = 12 + + w_selected_rows = scope.var('W').get_selected_rows() + w_selected_rows.set_height(len(rows)) + w_selected_rows.set_rows(rows) + w_array = np.ones((len(rows), row_numel)).astype("float32") + for i in range(len(rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + # create Out Variable + out_tensor = scope.var('Out').get_tensor() + + # create and run lookup_table operator + lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out') + lookup_table.run(scope, place) + + # get result from Out + result_array = np.array(out_tensor) + # all(): return True if all elements of the iterable are true (or if the iterable is empty) + for idx, row in enumerate(ids_array): + assert (row[0] == result_array[idx]).all() + + def test_w_is_selected_rows(self): + places = [core.CPUPlace()] + # currently only support CPU + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py old mode 100755 new mode 100644 diff --git a/python/paddle/fluid/tests/unittests/test_multiple_reader.py b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py similarity index 90% rename from python/paddle/fluid/tests/unittests/test_multiple_reader.py rename to python/paddle/fluid/tests/unittests/test_multi_file_reader.py index 69f8acf81efaba8fc0f3df4cfe3a42dc4e477df2..5dc41e54d6158787eb966333c894e378b5c706d0 100644 --- a/python/paddle/fluid/tests/unittests/test_multiple_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py @@ -15,8 +15,8 @@ import unittest import paddle.fluid as fluid -import paddle.v2 as paddle -import paddle.v2.dataset.mnist as mnist +import paddle +import paddle.dataset.mnist as mnist from shutil import copyfile @@ -61,8 +61,12 @@ class TestMultipleReader(unittest.TestCase): exe.run(fluid.default_startup_program()) batch_count = 0 - while not data_files.eof(): - img_val, = exe.run(fetch_list=[img]) + while True: + try: + img_val, = exe.run(fetch_list=[img]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) data_files.reset() diff --git a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py index 8add353303e3626bbce68199a100306d4858766a..1471843ded7a42432a84a9fad76bb97dcf7fb9c2 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py @@ -15,8 +15,8 @@ import unittest import paddle.fluid as fluid -import paddle.v2 as paddle -import paddle.v2.dataset.mnist as mnist +import paddle +import paddle.dataset.mnist as mnist class TestMultipleReader(unittest.TestCase): @@ -44,7 +44,7 @@ class TestMultipleReader(unittest.TestCase): shapes=[(-1, 784), (-1, 1)], lod_levels=[0, 0], dtypes=['float32', 'int64']) - data_file = fluid.layers.create_multi_pass_reader( + data_file = fluid.layers.io.multi_pass( reader=data_file, pass_num=self.pass_num) img, label = fluid.layers.read_file(data_file) @@ -57,8 +57,12 @@ class TestMultipleReader(unittest.TestCase): exe.run(fluid.default_startup_program()) batch_count = 0 - while not data_file.eof(): - img_val, = exe.run(fetch_list=[img]) + while True: + try: + img_val, = exe.run(fetch_list=[img]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) data_file.reset() diff --git a/python/paddle/fluid/tests/unittests/test_net.py b/python/paddle/fluid/tests/unittests/test_net.py deleted file mode 100644 index ae1699d647d7c0adab36200fb07bde12085053c1..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_net.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid.core as core -from paddle.fluid.op import Operator -import unittest - - -def fc(X, W, Y): - ret_v = core.Net.create() - - ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation")) - ret_v.append_op(Operator("sigmoid", X="pre_activation", Out=Y)) - ret_v.complete_add_op(True) - return ret_v - - -class TestNet(unittest.TestCase): - def test_net_all(self): - net = core.Net.create() - op1 = Operator("sum", X=["X", "Y"], Out="Out") - net.append_op(op1) - - net2 = core.Net.create() - net2.append_op(fc(X="X", W="w", Y="fc.out")) - net2.complete_add_op(True) - net.append_op(net2) - net.complete_add_op(True) - - expected = ''' -Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}. - Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. - Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. - Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. - Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}. -''' - self.assertEqual(expected, "\n" + str(net)) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..83d22fd799eea55eedb58f93421b275985edb50b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -0,0 +1,652 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy +import unittest + +import paddle.fluid as fluid +import paddle +import paddle.dataset.mnist as mnist +import paddle.dataset.wmt16 as wmt16 + + +def simple_fc_net(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_files( + filenames=['./mnist.recordio'], + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=1, + for_parallel=True) + reader = fluid.layers.io.double_buffer(reader) + img, label = fluid.layers.read_file(reader) + hidden = img + for _ in xrange(4): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +def fc_with_batchnorm(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_files( + filenames=['mnist.recordio'], + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=1, + for_parallel=True) + reader = fluid.layers.io.double_buffer(reader) + img, label = fluid.layers.read_file(reader) + + hidden = img + for _ in xrange(1): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + + hidden = fluid.layers.batch_norm(input=hidden) + + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +def squeeze_excitation(input, num_channels, reduction_ratio): + # pool = fluid.layers.pool2d( + # input=input, pool_size=0, pool_type='avg', global_pooling=True) + conv = input + shape = conv.shape + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) + pool = fluid.layers.reduce_mean(input=reshape, dim=2) + + squeeze = fluid.layers.fc(input=pool, + size=num_channels / reduction_ratio, + act='relu') + excitation = fluid.layers.fc(input=squeeze, + size=num_channels, + act='sigmoid') + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, + act=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) / 2, + groups=groups, + act=None, + bias_attr=False) + return fluid.layers.batch_norm(input=conv, act=act, momentum=0.1) + + +def shortcut(input, ch_out, stride): + ch_in = input.shape[1] + if ch_in != ch_out: + if stride == 1: + filter_size = 1 + else: + filter_size = 3 + return conv_bn_layer(input, ch_out, filter_size, stride) + else: + return input + + +def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): + # The number of first 1x1 convolutional channels for each bottleneck build block + # was halved to reduce the compution cost. + conv0 = conv_bn_layer( + input=input, num_filters=num_filters, filter_size=1, act='relu') + conv1 = conv_bn_layer( + input=conv0, + num_filters=num_filters * 2, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu') + conv2 = conv_bn_layer( + input=conv1, num_filters=num_filters * 2, filter_size=1, act=None) + scale = squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio) + + short = shortcut(input, num_filters * 2, stride) + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + +def SE_ResNeXt50Small(batch_size=2, use_feed=False): + assert not use_feed, "SE_ResNeXt doesn't support feed yet" + + img = fluid.layers.fill_constant( + shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0) + label = fluid.layers.fill_constant( + shape=[batch_size, 1], dtype='int64', value=0.0) + + conv = conv_bn_layer( + input=img, num_filters=16, filter_size=3, stride=2, act='relu') + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') + + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 6, 3] + num_filters = [128, 256, 512, 1024] + + for block in range(len(depth)): + for i in range(depth[block]): + conv = bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio) + + shape = conv.shape + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) + pool = fluid.layers.reduce_mean(input=reshape, dim=2) + dropout = fluid.layers.dropout(x=pool, dropout_prob=0.2) + # Classifier layer: + prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +import time + + +class TestParallelExecutorBase(unittest.TestCase): + def check_network_convergence(self, + method, + memory_opt=True, + iter=10, + batch_size=None, + allow_op_delay=False, + feed_dict={}): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + loss = method(use_feed=len(feed_dict) > 0) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + if memory_opt: + fluid.memory_optimize(main) + + place = fluid.CUDAPlace(0) + startup_exe = fluid.Executor(place) + startup_exe.run(startup) + + exe = fluid.ParallelExecutor(True, loss_name=loss.name) + if batch_size is not None: + batch_size *= fluid.core.get_cuda_device_count() + begin = time.time() + first_loss, = exe.run([loss.name], feed_dict=feed_dict) + first_loss = numpy.array(first_loss) + + for i in xrange(iter): + exe.run([], feed_dict=feed_dict) + + last_loss, = exe.run([loss.name], feed_dict=feed_dict) + end = time.time() + + if batch_size is not None: + print "%.4f Instance per second" % ( + (batch_size * iter + 2) / (end - begin)) + + last_loss = numpy.array(last_loss) + + print first_loss, last_loss + # self.assertGreater(first_loss[0], last_loss[0]) + + +class TestMNIST(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + # Convert mnist to recordio file + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(mnist.train(), batch_size=4) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_file( + './mnist.recordio', reader, feeder) + + def test_simple_fc(self): + self.check_network_convergence(simple_fc_net) + self.check_network_convergence(simple_fc_net, allow_op_delay=True) + + img = numpy.zeros(shape=[32, 784], dtype='float32') + label = numpy.ones(shape=[32, 1], dtype='int64') + self.check_network_convergence( + simple_fc_net, feed_dict={"image": img, + "label": label}) + + def test_batchnorm_fc(self): + self.check_network_convergence(fc_with_batchnorm) + img = numpy.zeros(shape=[32, 784], dtype='float32') + label = numpy.ones(shape=[32, 1], dtype='int64') + self.check_network_convergence( + fc_with_batchnorm, feed_dict={"image": img, + "label": label}) + + +class TestResnet(TestParallelExecutorBase): + # @classmethod + # def setUpClass(cls): + # # import os + # # if os.path.exists('./flowers.recordio'): + # # return + # with fluid.program_guard(fluid.Program(), fluid.Program()): + # reader = paddle.batch(flowers.train(), batch_size=4) + # feeder = fluid.DataFeeder( + # feed_list=[ + # fluid.layers.data( + # name='image', shape=[3, 224, 224]), + # fluid.layers.data( + # name='label', shape=[1], dtype='int64'), + # ], + # place=fluid.CPUPlace()) + # fluid.recordio_writer.convert_reader_to_recordio_file( + # "./flowers.recordio", reader, feeder, compressor=fluid.core.RecordIOWriter.Compressor.NoCompress) + + def test_resnet(self): + import functools + batch_size = 2 + self.check_network_convergence( + functools.partial( + SE_ResNeXt50Small, batch_size=batch_size), + iter=20, + batch_size=batch_size) + + +class ModelHyperParams(object): + # Dictionary size for source and target language. This model directly uses + # paddle.dataset.wmt16 in which , and token has + # alreay been added, but the token is not added. Transformer requires + # sequences in a mini-batch are padded to have the same length. A token is + # added into the original dictionary in paddle.dateset.wmt16. + + # size of source word dictionary. + src_vocab_size = 10000 + # index for token in source language. + src_pad_idx = src_vocab_size + + # size of target word dictionay + trg_vocab_size = 10000 + # index for token in target language. + trg_pad_idx = trg_vocab_size + + # position value corresponding to the token. + pos_pad_idx = 0 + + # max length of sequences. It should plus 1 to include position + # padding token for position encoding. + max_length = 50 + + # the dimension for word embeddings, which is also the last dimension of + # the input and output of multi-head attention, position-wise feed-forward + # networks, encoder and decoder. + + d_model = 512 + # size of the hidden layer in position-wise feed-forward networks. + d_inner_hid = 1024 + # the dimension that keys are projected to for dot-product attention. + d_key = 64 + # the dimension that values are projected to for dot-product attention. + d_value = 64 + # number of head used in multi-head attention. + n_head = 8 + # number of sub-layers to be stacked in the encoder and decoder. + n_layer = 6 + # dropout rate used by all dropout layers. + dropout = 0.1 + + +import numpy as np + + +def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. Then, convert the numpy + data to tensors and return a dict mapping names to tensors. + """ + + def __pad_batch_data(insts, + pad_idx, + is_target=False, + return_pos=True, + return_attn_bias=True, + return_max_len=True): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. + """ + return_list = [] + max_len = max(len(inst) for inst in insts) + inst_data = np.array( + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + return_list += [inst_data.astype("int64").reshape([-1, 1])] + if return_pos: + inst_pos = np.array([[ + pos_i + 1 if w_i != pad_idx else 0 + for pos_i, w_i in enumerate(inst) + ] for inst in inst_data]) + + return_list += [inst_pos.astype("int64").reshape([-1, 1])] + if return_attn_bias: + if is_target: + # This is used to avoid attention on paddings and subsequent + # words. + slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, + max_len)) + slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( + [-1, 1, max_len, max_len]) + slf_attn_bias_data = np.tile(slf_attn_bias_data, + [1, n_head, 1, 1]) * [-1e9] + else: + # This is used to avoid attention on paddings. + slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * + (max_len - len(inst)) + for inst in insts]) + slf_attn_bias_data = np.tile( + slf_attn_bias_data.reshape([-1, 1, 1, max_len]), + [1, n_head, max_len, 1]) + return_list += [slf_attn_bias_data.astype("float32")] + if return_max_len: + return_list += [max_len] + return return_list if len(return_list) > 1 else return_list[0] + + def data_to_tensor(data_list, name_list, input_dict, place): + assert len(data_list) == len(name_list) + for i in range(len(name_list)): + tensor = fluid.LoDTensor() + tensor.set(data_list[i], place) + input_dict[name_list[i]] = tensor + + src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( + [inst[0] for inst in insts], src_pad_idx, is_target=False) + trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( + [inst[1] for inst in insts], trg_pad_idx, is_target=True) + trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], + [1, 1, trg_max_len, 1]).astype("float32") + lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, + False, False, False) + lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) + + return [ + src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, + trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + ] + + +import transformer_model + + +def transformer(use_feed): + assert not use_feed, "transfomer doesn't support feed yet" + return transformer_model.transformer( + ModelHyperParams.src_vocab_size + 1, + ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, ModelHyperParams.n_head, + ModelHyperParams.d_key, ModelHyperParams.d_value, + ModelHyperParams.d_model, ModelHyperParams.d_inner_hid, + ModelHyperParams.dropout, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx) + + +class TestTransformer(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + reader = paddle.batch( + wmt16.train(ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size), + batch_size=transformer_model.batch_size) + + with fluid.recordio_writer.create_recordio_writer( + "./wmt16.recordio") as writer: + for batch in reader(): + for tensor in prepare_batch_input( + batch, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.n_head): + t = fluid.LoDTensor() + t.set(tensor, fluid.CPUPlace()) + writer.append_tensor(t) + writer.complete_append_tensor() + + @unittest.skip("transformer is buggy in multi gpu") + def test_main(self): + self.check_network_convergence(transformer) + + +class ParallelExecutorTestingDuringTraining(unittest.TestCase): + def test_parallel_testing(self): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + loss = simple_fc_net(True) + test_program = main.clone(for_test=True) + + opt = fluid.optimizer.SGD(learning_rate=0.001) + opt.minimize(loss) + + batch_size = 32 + image = numpy.random.normal(size=(batch_size, + 784)).astype('float32') + label = numpy.random.randint(0, 10, (batch_size, 1), dtype="int64") + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(startup) + feed_dict = {'image': image, 'label': label} + + train_exe = fluid.ParallelExecutor( + use_cuda=True, loss_name=loss.name, main_program=main) + + test_exe = fluid.ParallelExecutor( + use_cuda=True, + main_program=test_program, + share_vars_from=train_exe) + + for i in xrange(5): + test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + test_loss = numpy.array(test_loss) + + train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) + train_loss = numpy.array(train_loss) + self.assertTrue( + numpy.allclose( + train_loss, test_loss, atol=1e-8), + "Train loss: " + str(train_loss) + "\n Test loss:" + + str(test_loss)) + + +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) +mark_dict_len = 2 +word_dim = 32 +mark_dim = 5 +hidden_dim = 512 +depth = 8 +mix_hidden_lr = 1e-3 +embedding_name = 'emb' + + +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + **ignored): + # 8 features + predicate_embedding = fluid.layers.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + param_attr='vemb') + + mark_embedding = fluid.layers.embedding( + input=mark, size=[mark_dict_len, mark_dim], dtype='float32') + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.layers.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') + ]) + + return feature_out + + +class TestCRFModel(unittest.TestCase): + def test_all(self): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + feature_out = db_lstm(**locals()) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=1e-1)) + avg_cost = fluid.layers.mean(crf_cost) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=16) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(startup) + + pe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) + + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, + mark, target + ], + place=fluid.CPUPlace()) + + data = train_data() + for i in xrange(10): + cur_batch = next(data) + print map(numpy.array, + pe.run(feed_dict=feeder.feed(cur_batch), + fetch_list=[avg_cost.name]))[0] diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index c21138c13e6753f9dfcbd7d439269f7cf9a04f23..bcbc02a2baa46b9ab583ecf3006bd3262e6038fd 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -28,7 +28,6 @@ class TestPriorBoxOp(OpTest): self.attrs = { 'min_sizes': self.min_sizes, - 'max_sizes': self.max_sizes, 'aspect_ratios': self.aspect_ratios, 'variances': self.variances, 'flip': self.flip, @@ -37,25 +36,28 @@ class TestPriorBoxOp(OpTest): 'step_h': self.step_h, 'offset': self.offset } + if len(self.max_sizes) > 0: + self.attrs['max_sizes'] = self.max_sizes self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} def test_check_output(self): self.check_output() - def test_check_grad(self): - return - def setUp(self): self.op_type = "prior_box" self.set_data() + def set_max_sizes(self): + max_sizes = [5, 10] + self.max_sizes = np.array(max_sizes).astype('float32').tolist() + def init_test_params(self): - self.layer_w = 4 - self.layer_h = 4 + self.layer_w = 32 + self.layer_h = 32 - self.image_w = 20 - self.image_h = 20 + self.image_w = 40 + self.image_h = 40 self.step_w = float(self.image_w) / float(self.layer_w) self.step_h = float(self.image_h) / float(self.layer_h) @@ -66,8 +68,7 @@ class TestPriorBoxOp(OpTest): self.min_sizes = [2, 4] self.min_sizes = np.array(self.min_sizes).astype('float32').tolist() - self.max_sizes = [5, 10] - self.max_sizes = np.array(self.max_sizes).astype('float32').tolist() + self.set_max_sizes() self.aspect_ratios = [2.0, 3.0] self.flip = True self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] @@ -79,7 +80,7 @@ class TestPriorBoxOp(OpTest): self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) - if len(self.max_sizes) > 1: + if len(self.max_sizes) > 0: self.num_priors += len(self.max_sizes) self.offset = 0.5 @@ -105,35 +106,27 @@ class TestPriorBoxOp(OpTest): idx = 0 for s in range(len(self.min_sizes)): min_size = self.min_sizes[s] - c_w = c_h = min_size / 2. - out_boxes[h, w, idx, :] = [ - (c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h - ] - idx += 1 - - if len(self.max_sizes) > 0: - max_size = self.max_sizes[s] - # second prior: aspect_ratio = 1, - c_w = c_h = math.sqrt(min_size * max_size) / 2 + # rest of priors + for r in range(len(self.real_aspect_ratios)): + ar = self.real_aspect_ratios[r] + c_w = min_size * math.sqrt(ar) / 2 + c_h = (min_size / math.sqrt(ar)) / 2 out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h] idx += 1 - # rest of priors - for r in range(len(self.real_aspect_ratios)): - ar = self.real_aspect_ratios[r] - if math.fabs(ar - 1.) < 1e-6: - continue - c_w = min_size * math.sqrt(ar) / 2 - c_h = (min_size / math.sqrt(ar)) / 2 + if len(self.max_sizes) > 0: + max_size = self.max_sizes[s] + # second prior: aspect_ratio = 1, + c_w = c_h = math.sqrt(min_size * max_size) / 2 out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h] idx += 1 + # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) @@ -144,5 +137,10 @@ class TestPriorBoxOp(OpTest): self.out_var = out_var.astype('float32') +class TestPriorBoxOpWithMaxSize(TestPriorBoxOp): + def set_max_sizes(self): + self.max_sizes = [] + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 87a2195f0d5c7fd355ea01a3c8f60908b33d4b9d..c51a48239330621d8e008415f81361616467cabf 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -87,57 +87,6 @@ class TestProgram(unittest.TestCase): print(prog) print(prog_restored) - def test_append_backward(self): - prog = Program() - block = prog.global_block() - - mul_x = block.create_var( - dtype="float32", shape=[5, 10], lod_level=0, name="mul.x") - mul_y = block.create_var( - dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") - mul_out = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") - mul_op = block.append_op( - type="mul", - inputs={"X": [mul_x], - "Y": mul_y}, - outputs={"Out": [mul_out]}, - attrs={"x_num_col_dims": 1}) - - add_y = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="add.y") - add_out = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="add.out") - add_op = block.append_op( - type="elementwise_add", - inputs={"X": mul_out, - "Y": add_y}, - outputs={"Out": add_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var( - dtype="float32", shape=[1], lod_level=0, name="mean.out") - block.append_op( - type="mean", inputs={"X": add_out}, outputs={"Out": mean_out}) - - self.assertEqual(mul_op.idx, 0) - self.assertEqual(add_op.idx, 1) - param_to_grad = prog.append_backward(mean_out, set()) - - for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out", - "mean.out"): - self.assertEqual(param_to_grad[var_name][0], - grad_var_name(var_name)) - self.assertEqual(param_to_grad[var_name][1], 0) - - expect_ops = [ - "mul", "elementwise_add", "mean", "fill_constant", "mean_grad", - "elementwise_add_grad", "mul_grad" - ] - actual_ops = [] - for op in block.ops: - actual_ops.append(op.type) - self.assertEqual(actual_ops, expect_ops) - def test_program_clone_with_parameter(self): main_program = Program() startup_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index 309ea2b9b7ede442da3ac897ce8d1a4b9aa68233..3f9059fb5b31cd009c068ccddc9a8938adae5772 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -14,13 +14,14 @@ import unittest import paddle.fluid.core as core +from paddle.fluid.framework import Program class TestOpDesc(unittest.TestCase): def test_op_desc(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) op = block.append_op() self.assertIsNotNone(op) @@ -66,7 +67,7 @@ class TestOpDesc(unittest.TestCase): self.assertEqual(8, len(op.attr_names())) - op.set_block_attr("block_attr", prog.block(0)) + op.set_block_attr("block_attr", program_desc.block(0)) self.assertEqual(0, op.block_attr("block_attr")) mul_op = block.append_op() @@ -87,20 +88,20 @@ class TestProgramDesc(unittest.TestCase): del program_desc def test_append_block(self): - prog_desc = core.ProgramDesc() - self.assertIsNotNone(prog_desc) - block_root = prog_desc.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block_root = program_desc.block(0) self.assertIsNotNone(block_root) self.assertEqual(block_root.id, 0) - block1 = prog_desc.append_block(block_root) - block2 = prog_desc.append_block(block1) + block1 = program_desc.append_block(block_root) + block2 = program_desc.append_block(block1) self.assertIsNotNone(block1) self.assertEqual(block1.id, block2.parent) self.assertEqual(block_root.id, block1.parent) - block3 = prog_desc.append_block(block_root) + block3 = program_desc.append_block(block_root) self.assertEqual(block3.parent, block_root.id) - self.assertEqual(prog_desc.block(1).id, 1) - self.assertEqual(4, prog_desc.num_blocks()) + self.assertEqual(program_desc.block(1).id, 1) + self.assertEqual(4, program_desc.num_blocks()) class TestVarDesc(unittest.TestCase): @@ -161,9 +162,9 @@ class TestVarDesc(unittest.TestCase): class TestBlockDesc(unittest.TestCase): def test_add_var(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) var1 = block.var("var1") var2 = block.var("var2") @@ -174,9 +175,9 @@ class TestBlockDesc(unittest.TestCase): self.assertEqual(var2_re, var2) def test_add_op(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) op1 = block.append_op() op2 = block.append_op() @@ -186,6 +187,28 @@ class TestBlockDesc(unittest.TestCase): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op1, op2]) + def test_remove_op(self): + program = Program() + program_desc = program.desc + self.assertIsNotNone(program_desc) + block = program_desc.block(0) + self.assertIsNotNone(block) + + op0 = block.append_op() + op1 = block.append_op() + op2 = block.append_op() + op0.set_type("test") + op1.set_type("test") + op2.set_type("test") + + block.remove_op(1, 2) + program.sync_with_cpp() + + all_ops = [] + for idx in xrange(0, block.op_size()): + all_ops.append(block.op(idx)) + self.assertEqual(all_ops, [op0, op2]) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py index 24a0074d9b9621d902d12eb8cb29d9b65be22ed3..7c8e7f634fdd3ee3f056a95df774402a7c29e906 100644 --- a/python/paddle/fluid/tests/unittests/test_recordio_reader.py +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -65,8 +65,13 @@ class TestRecordIO(unittest.TestCase): # train a pass batch_id = 0 - while not data_file.eof(): - tmp, = exe.run(fetch_list=[avg_loss]) + while True: + try: + tmp, = exe.run(fetch_list=[avg_loss]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break + avg_loss_np.append(tmp) batch_id += 1 data_file.reset() @@ -74,8 +79,8 @@ class TestRecordIO(unittest.TestCase): self.assertLess(avg_loss_np[-1], avg_loss_np[0]) def test_shuffle_reader(self): - self.test_main(decorator_callback=lambda reader: fluid.layers.create_shuffle_reader(reader, buffer_size=200)) + self.test_main(decorator_callback=lambda reader: fluid.layers.io.shuffle(reader, buffer_size=200)) def test_double_buffer_reader(self): - self.test_main(decorator_callback=lambda reader: fluid.layers.create_double_buffer_reader(reader, + self.test_main(decorator_callback=lambda reader: fluid.layers.io.double_buffer(reader, place='cuda:0' if fluid.core.is_compiled_with_cuda() else 'cpu')) diff --git a/python/paddle/fluid/tests/unittests/test_recv_op.py b/python/paddle/fluid/tests/unittests/test_recv_op.py index 854238c6279528d8f3adf173140a47e233134f43..2ebceca7e4b7b824194d94180462870e6cfe6d21 100644 --- a/python/paddle/fluid/tests/unittests/test_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_recv_op.py @@ -23,7 +23,7 @@ import time class TestRecvOp(unittest.TestCase): - def test_send(self): + def no_test_send(self): # Run init_serv in a thread place = fluid.CPUPlace() p = Process(target=self.init_serv, args=(place, )) diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 11f35c74d41146118525a5efa6c211d528e255fe..f51b5a7e9907294a5b91c920a363830d8b9a7137 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -14,15 +14,19 @@ import unittest import numpy as np + from op_test import OpTest class TestReshapeOp(OpTest): def setUp(self): + ori_shape = (2, 25) + new_shape = (5, 10) + self.op_type = "reshape" - self.inputs = {'X': np.random.random((10, 20)).astype("float32")} - self.attrs = {'shape': [10 * 20]} - self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() @@ -31,12 +35,33 @@ class TestReshapeOp(OpTest): self.check_grad(["X"], "Out") -class TestReshapeOpDimInfer(OpTest): +class TestReshapeOpDimInfer1(OpTest): def setUp(self): + ori_shape = (5, 10) + new_shape = (5, -1, 5) + self.op_type = "reshape" - self.inputs = {'X': np.random.random((10, 20)).astype("float32")} - self.attrs = {'shape': [4, -1, 5]} - self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpDimInfer2(OpTest): + def setUp(self): + ori_shape = (2, 2, 6) + new_shape = (2, 0, 3, -1) + infered_shape = (2, 2, 3, -1) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape, "inplace": False} + self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} def test_check_output(self): self.check_output() @@ -47,10 +72,30 @@ class TestReshapeOpDimInfer(OpTest): class TestReshapeOpInplace(OpTest): def setUp(self): + ori_shape = (2, 25) + new_shape = (5, 10) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpDimInferInplace1(OpTest): + def setUp(self): + ori_shape = (5, 10) + new_shape = (5, -1, 5) + self.op_type = "reshape" - self.inputs = {'X': np.random.random((10, 20)).astype("float32")} - self.attrs = {'shape': [10 * 20], 'inplace': True} - self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): self.check_output() @@ -59,12 +104,38 @@ class TestReshapeOpInplace(OpTest): self.check_grad(["X"], "Out") -class TestReshapeOpDimInferInplace(OpTest): +class TestReshapeOpDimInferInplace2(OpTest): def setUp(self): + ori_shape = (2, 2, 6) + new_shape = (2, 0, 3, -1) + infered_shape = (2, 2, 3, -1) + + self.op_type = "reshape" + self.inputs = {"X": np.random.random(ori_shape).astype("float32")} + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestReshapeOpWithInputShape(OpTest): + def setUp(self): + ori_shape = (6, 5) + new_shape = (0, -1, 5) + actual_shape = (2, 3, 5) + self.op_type = "reshape" - self.inputs = {'X': np.random.random((10, 20)).astype("float32")} - self.attrs = {'shape': [4, -1, 5], 'inplace': True} - self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + self.inputs = { + "X": np.random.random(ori_shape).astype("float32"), + "Shape": np.array( + actual_shape, dtype="int32") + } + self.attrs = {"shape": new_shape} + self.outputs = {"Out": self.inputs["X"].reshape(actual_shape)} def test_check_output(self): self.check_output() @@ -73,5 +144,5 @@ class TestReshapeOpDimInferInplace(OpTest): self.check_grad(["X"], "Out") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index 04884757216bc29a96eb97a6db403c3925472294..2e48ef0e880839f6d5b4e515a174f427a35e7e6f 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -49,6 +49,61 @@ class TestSeqAvgPool(OpTest): self.check_grad(["X"], "Out") +class TestSeqSumPool(TestSeqAvgPool): + def compute(self, x, lod, out): + self.attrs = {'pooltype': "SUM"} + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + out[i] = sub_x.sum(axis=0) + + +class TestSeqMaxPool(TestSeqAvgPool): + def set_data(self): + self.op_type = 'sequence_pool' + x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') + lod = [[0, 4, 5, 8, 13]] + for i in range(4): + l = lod[0][i + 1] - lod[0][i] + x[lod[0][i] + np.random.randint(l), :] += 2.0 + + self.inputs = {'X': (x, lod)} + + out = np.zeros((4, 23)).astype('float32') + self.outputs = {'Out': out} + return x, lod, out + + def compute(self, x, lod, out): + self.attrs = {'pooltype': "MAX"} + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + out[i] = np.amax(sub_x, axis=0) + + +class TestSeqSqrtPool(TestSeqAvgPool): + def compute(self, x, lod, out): + self.attrs = {'pooltype': "SQRT"} + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + len = lod[0][i + 1] - lod[0][i] + out[i] = sub_x.sum(axis=0) / np.sqrt(len) + + +class TestSeqLastPool(TestSeqAvgPool): + def compute(self, x, lod, out): + self.attrs = {'pooltype': "LAST"} + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + out[i] = sub_x[-1, :] + + +class TestSeqFirstPool(TestSeqAvgPool): + def compute(self, x, lod, out): + self.attrs = {'pooltype': "FIRST"} + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + out[i] = sub_x[0, :] + + class TestSeqAvgPool2D(TestSeqAvgPool): def set_data(self): self.op_type = 'sequence_pool' @@ -68,14 +123,6 @@ class TestSeqAvgPool2D(TestSeqAvgPool): out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) -class TestSeqSumPool(TestSeqAvgPool): - def compute(self, x, lod, out): - self.attrs = {'pooltype': "SUM"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - out[i] = sub_x.sum(axis=0) - - class TestSeqSumPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): self.attrs = {'pooltype': "SUM"} @@ -84,15 +131,6 @@ class TestSeqSumPool2D(TestSeqAvgPool2D): out[i] = np.reshape(sub_x.sum(axis=0), (3, 17)) -class TestSeqSqrtPool(TestSeqAvgPool): - def compute(self, x, lod, out): - self.attrs = {'pooltype': "SQRT"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - len = lod[0][i + 1] - lod[0][i] - out[i] = sub_x.sum(axis=0) / np.sqrt(len) - - class TestSeqSqrtPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): self.attrs = {'pooltype': "SQRT"} @@ -108,28 +146,6 @@ class TestSeqSqrtPool2D(TestSeqAvgPool2D): self.check_grad(["X"], "Out", max_relative_error=0.06) -class TestSeqMaxPool(TestSeqAvgPool): - def set_data(self): - self.op_type = 'sequence_pool' - x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') - lod = [[0, 4, 5, 8, 13]] - for i in range(4): - l = lod[0][i + 1] - lod[0][i] - x[lod[0][i] + np.random.randint(l), :] += 2.0 - - self.inputs = {'X': (x, lod)} - - out = np.zeros((4, 23)).astype('float32') - self.outputs = {'Out': out} - return x, lod, out - - def compute(self, x, lod, out): - self.attrs = {'pooltype': "MAX"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - out[i] = np.amax(sub_x, axis=0) - - class TestSeqMaxPool2D(TestSeqAvgPool2D): def set_data(self): self.op_type = 'sequence_pool' @@ -151,14 +167,6 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) -class TestSeqLastPool(TestSeqAvgPool): - def compute(self, x, lod, out): - self.attrs = {'pooltype': "LAST"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - out[i] = sub_x[-1, :] - - class TestSeqLastPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): self.attrs = {'pooltype': "LAST"} @@ -167,14 +175,6 @@ class TestSeqLastPool2D(TestSeqAvgPool2D): out[i] = np.reshape(sub_x[-1, :], (3, 17)) -class TestSeqFirstPool(TestSeqAvgPool): - def compute(self, x, lod, out): - self.attrs = {'pooltype': "FIRST"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - out[i] = sub_x[0, :] - - class TestSeqFirstPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): self.attrs = {'pooltype': "FIRST"} diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 7feb509c4d6f5768552fc2515081f7e68f420967..4c8ec1426c6e103498af544ea5928ec630707d46 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -47,8 +47,10 @@ class TestSequenceExpand(OpTest): x_len = x_idx[i] - x_idx[i - 1] if repeat_num > 0: x_sub = x_data[x_idx[i - 1]:x_idx[i], :] - x_sub = np.repeat(x_sub, repeat_num, axis=0) - out = np.vstack((out, x_sub)) + stacked_x_sub = x_sub + for r in range(repeat_num - 1): + stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) + out = np.vstack((out, stacked_x_sub)) if x_lod is not None: for j in xrange(repeat_num): out_lod[0].append(out_lod[0][-1] + x_len) @@ -101,11 +103,11 @@ class TestSequenceExpandCase3(TestSequenceExpand): class TestSequenceExpandCase4(TestSequenceExpand): def set_data(self): - data = [0.1, 0.3, 0.2, 0.15, 0.25, 0.2, 0.15, 0.25, 0.1, 0.3] + data = np.random.uniform(0.1, 1, [5 * 2, 1]) x_data = np.array(data).reshape([5, 2]).astype('float32') x_lod = [[0, 2, 5]] - y_data = np.random.uniform(0.1, 1, [2, 1]).astype('float32') - y_lod = [[0, 1, 2], [0, 1, 2]] + y_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') + y_lod = [[0, 1, 3], [0, 1, 3]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index c498b23db12cd83304f4c3a3d1f15bd68ad4f0b6..3126293f9d8e52daa866be5fc1533648a33f3363 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -97,5 +97,72 @@ class TestSparseSGDOp(unittest.TestCase): self.check_with_place(place) +class TestSGDOpOptimizeSelectedRows(unittest.TestCase): + def check_with_place(self, place): + scope = core.Scope() + + row_width = 12 + # create and initialize Grad Variable + grad_height = 10 + grad_rows = [0, 4, 7] + + grad_selected_rows = scope.var('Grad').get_selected_rows() + grad_selected_rows.set_height(grad_height) + grad_selected_rows.set_rows(grad_rows) + grad_array = np.ones((len(grad_rows), row_width)).astype("float32") + grad_array[0, 0] = 2.0 + grad_array[2, 8] = 4.0 + + grad_tensor = grad_selected_rows.get_tensor() + grad_tensor.set(grad_array, place) + + # create and initialize Param Variable + # create and initialize W Variable + param_rows = [0, 1, 2, 3, 4, 5, 6, 7] + + # init Param + w_selected_rows = scope.var('Param').get_selected_rows() + w_selected_rows.set_height(len(param_rows)) + w_selected_rows.set_rows(param_rows) + w_array = np.ones((len(param_rows), row_width)).astype("float32") + for i in range(len(param_rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + w_before_optimize = np.array(w_tensor) + + # create and initialize LeraningRate Variable + lr_value = 0.1 + lr = scope.var('LearningRate').get_tensor() + lr_array = np.full((1), lr_value).astype("float32") + lr.set(lr_array, place) + + # optimize with Python + w_after_optimize = np.copy(w_before_optimize) + for index, id in enumerate(grad_rows): + w_after_optimize[id] = w_before_optimize[ + id] - lr_value * grad_array[index] + + # create and run sgd operator + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate') + sgd_op.run(scope, place) + + # get and compare result + result_array = np.array(w_tensor) + assert (result_array == w_after_optimize).all() + + def test_sparse_parameter_sgd(self): + places = [core.CPUPlace()] + # do not support GPU kernel currently + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 33d60c7e31ce0817ad26ea1c1c974339936052d3..279f3073f73d1c36f54bb901d92441a7403ac23f 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -68,6 +68,17 @@ class TestSoftmaxCUDNNOp(TestSoftmaxOp): self.use_cudnn = True +class TestSoftmaxFP16Op(TestSoftmaxOp): + def init_kernel_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + + class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_cudnn = True diff --git a/python/paddle/fluid/tests/unittests/test_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_target_assign_op.py old mode 100755 new mode 100644 diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 75ff85a55fc4fd504ecd032e17f7e189c17192fb..346a949b6e7c96b5535f5e65ddbada11e110a0a7 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -15,6 +15,16 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +def output_hist(out): + hist, _ = np.histogram(out, range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(out.size) + prob = 0.1 * np.ones((10)) + return hist, prob class TestUniformRandomOp(OpTest): @@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest): self.check_output_customized(self.verify_output) def verify_output(self, outs): - tensor = outs[0] - hist, _ = np.histogram(outs[0], range=(-5, 10)) - hist = hist.astype("float32") - hist /= float(outs[0].size) - prob = 0.1 * np.ones((10)) + hist, prob = output_hist(np.array(outs[0])) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + +class TestUniformRandomOpSelectedRows(unittest.TestCase): + def get_places(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + return places + + def test_check_output(self): + for place in self.get_places(): + self.check_with_place(place) + + def check_with_place(self, place): + scope = core.Scope() + out = scope.var("X").get_selected_rows() + + op = Operator( + "uniform_random", + Out="X", + shape=[4, 784], + min=-5.0, + max=10.0, + seed=10) + op.run(scope, place) + self.assertEqual(out.get_tensor().shape(), [4, 784]) + hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) diff --git a/python/paddle/fluid/tests/unittests/transformer_model.py b/python/paddle/fluid/tests/unittests/transformer_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c62792face3c353db1f2e3c77eaf4bd32fbded69 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/transformer_model.py @@ -0,0 +1,487 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +import numpy as np + +import paddle.fluid as fluid +import paddle.fluid.layers as layers + +pos_enc_param_names = ( + "src_pos_enc_table", + "trg_pos_enc_table", ) + +batch_size = 64 + + +def position_encoding_init(n_position, d_pos_vec): + """ + Generate the initial values for the sinusoid position encoding table. + """ + position_enc = np.array([[ + pos / np.power(10000, 2 * (j // 2) / d_pos_vec) + for j in range(d_pos_vec) + ] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)]) + position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i + position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1 + return position_enc.astype("float32") + + +def multi_head_attention(queries, + keys, + values, + attn_bias, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.): + """ + Multi-Head Attention. Note that attn_bias is added to the logit before + computing softmax activiation to mask certain selected positions so that + they will not considered in attention weights. + """ + if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): + raise ValueError( + "Inputs: quries, keys and values should all be 3-D tensors.") + + def __compute_qkv(queries, keys, values, n_head, d_key, d_value): + """ + Add linear projection to queries, keys, and values. + """ + q = layers.fc(input=queries, + size=d_key * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, + fan_in=d_model * d_key, + fan_out=n_head * d_key), + bias_attr=False, + num_flatten_dims=2) + k = layers.fc(input=keys, + size=d_key * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, + fan_in=d_model * d_key, + fan_out=n_head * d_key), + bias_attr=False, + num_flatten_dims=2) + v = layers.fc(input=values, + size=d_value * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, + fan_in=d_model * d_value, + fan_out=n_head * d_value), + bias_attr=False, + num_flatten_dims=2) + return q, k, v + + def __split_heads(x, n_head): + """ + Reshape the last dimension of inpunt tensor x so that it becomes two + dimensions and then transpose. Specifically, input a tensor with shape + [bs, max_sequence_length, n_head * hidden_dim] then output a tensor + with shape [bs, n_head, max_sequence_length, hidden_dim]. + """ + if n_head == 1: + return x + + hidden_size = x.shape[-1] + # FIXME(guosheng): Decouple the program desc with batch_size. + reshaped = layers.reshape( + x=x, shape=[batch_size, -1, n_head, hidden_size // n_head]) + + # permuate the dimensions into: + # [batch_size, n_head, max_sequence_len, hidden_size_per_head] + return layers.transpose(x=reshaped, perm=[0, 2, 1, 3]) + + def __combine_heads(x): + """ + Transpose and then reshape the last two dimensions of inpunt tensor x + so that it becomes one dimension, which is reverse to __split_heads. + """ + if len(x.shape) == 3: return x + if len(x.shape) != 4: + raise ValueError("Input(x) should be a 4-D Tensor.") + + trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) + # FIXME(guosheng): Decouple the program desc with batch_size. + return layers.reshape( + x=trans_x, + shape=map(int, + [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]])) + + def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): + """ + Scaled Dot-Product Attention + """ + + # FIXME(guosheng): Optimize the shape in reshape_op or softmax_op. + + # The current implementation of softmax_op only supports 2D tensor, + # consequently it cannot be directly used here. + # If to use the reshape_op, Besides, the shape of product inferred in + # compile-time is not the actual shape in run-time. It cann't be used + # to set the attribute of reshape_op. + # So, here define the softmax for temporary solution. + + def __softmax(x, eps=1e-9): + exp_out = layers.exp(x=x) + sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False) + return layers.elementwise_div(x=exp_out, y=sum_out, axis=0) + + scaled_q = layers.scale(x=q, scale=d_model**-0.5) + product = layers.matmul(x=scaled_q, y=k, transpose_y=True) + weights = __softmax(layers.elementwise_add(x=product, y=attn_bias)) + if dropout_rate: + weights = layers.dropout( + weights, dropout_prob=dropout_rate, is_test=False) + out = layers.matmul(weights, v) + return out + + q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value) + + q = __split_heads(q, n_head) + k = __split_heads(k, n_head) + v = __split_heads(v, n_head) + + ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model, + dropout_rate) + + out = __combine_heads(ctx_multiheads) + + # Project back to the model size. + proj_out = layers.fc(input=out, + size=d_model, + param_attr=fluid.initializer.Xavier(uniform=False), + bias_attr=False, + num_flatten_dims=2) + return proj_out + + +def positionwise_feed_forward(x, d_inner_hid, d_hid): + """ + Position-wise Feed-Forward Networks. + This module consists of two linear transformations with a ReLU activation + in between, which is applied to each position separately and identically. + """ + hidden = layers.fc(input=x, + size=d_inner_hid, + num_flatten_dims=2, + param_attr=fluid.initializer.Uniform( + low=-(d_hid**-0.5), high=(d_hid**-0.5)), + act="relu") + out = layers.fc(input=hidden, + size=d_hid, + num_flatten_dims=2, + param_attr=fluid.initializer.Uniform( + low=-(d_inner_hid**-0.5), high=(d_inner_hid**-0.5))) + return out + + +def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.): + """ + Add residual connection, layer normalization and droput to the out tensor + optionally according to the value of process_cmd. + + This will be used before or after multi-head attention and position-wise + feed-forward networks. + """ + for cmd in process_cmd: + if cmd == "a": # add residual connection + out = out + prev_out if prev_out else out + elif cmd == "n": # add layer normalization + out = layers.layer_norm( + out, + begin_norm_axis=len(out.shape) - 1, + param_attr=fluid.initializer.Constant(1.), + bias_attr=fluid.initializer.Constant(0.)) + elif cmd == "d": # add dropout + if dropout: + out = layers.dropout(out, dropout_prob=dropout, is_test=False) + return out + + +pre_process_layer = partial(pre_post_process_layer, None) +post_process_layer = pre_post_process_layer + + +def prepare_encoder(src_word, + src_pos, + src_vocab_size, + src_emb_dim, + src_pad_idx, + src_max_len, + dropout=0., + pos_pad_idx=0, + pos_enc_param_name=None): + """Add word embeddings and position encodings. + The output tensor has a shape of: + [batch_size, max_src_length_in_batch, d_model]. + + This module is used at the bottom of the encoder stacks. + """ + src_word_emb = layers.embedding( + src_word, + size=[src_vocab_size, src_emb_dim], + padding_idx=src_pad_idx, + param_attr=fluid.initializer.Normal(0., 1.)) + src_pos_enc = layers.embedding( + src_pos, + size=[src_max_len, src_emb_dim], + padding_idx=pos_pad_idx, + param_attr=fluid.ParamAttr( + name=pos_enc_param_name, trainable=False)) + enc_input = src_word_emb + src_pos_enc + + # FIXME(guosheng): Decouple the program desc with batch_size. + enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim]) + return layers.dropout( + enc_input, dropout_prob=dropout, + is_test=False) if dropout else enc_input + + +prepare_encoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[0]) +prepare_decoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[1]) + + +def encoder_layer(enc_input, + attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.): + """The encoder layers that can be stacked to form a deep encoder. + + This module consits of a multi-head (self) attention followed by + position-wise feed-forward networks and both the two components companied + with the post_process_layer to add residual connection, layer normalization + and droput. + """ + attn_output = multi_head_attention(enc_input, enc_input, enc_input, + attn_bias, d_key, d_value, d_model, + n_head, dropout_rate) + attn_output = post_process_layer(enc_input, attn_output, "dan", + dropout_rate) + ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model) + return post_process_layer(attn_output, ffd_output, "dan", dropout_rate) + + +def encoder(enc_input, + attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.): + """ + The encoder is composed of a stack of identical layers returned by calling + encoder_layer. + """ + for i in range(n_layer): + enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value, + d_model, d_inner_hid, dropout_rate) + enc_input = enc_output + return enc_output + + +def decoder_layer(dec_input, + enc_output, + slf_attn_bias, + dec_enc_attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.): + """ The layer to be stacked in decoder part. + + The structure of this module is similar to that in the encoder part except + a multi-head attention is added to implement encoder-decoder attention. + """ + slf_attn_output = multi_head_attention( + dec_input, + dec_input, + dec_input, + slf_attn_bias, + d_key, + d_value, + d_model, + n_head, + dropout_rate, ) + slf_attn_output = post_process_layer( + dec_input, + slf_attn_output, + "dan", # residual connection + dropout + layer normalization + dropout_rate, ) + enc_attn_output = multi_head_attention( + slf_attn_output, + enc_output, + enc_output, + dec_enc_attn_bias, + d_key, + d_value, + d_model, + n_head, + dropout_rate, ) + enc_attn_output = post_process_layer( + slf_attn_output, + enc_attn_output, + "dan", # residual connection + dropout + layer normalization + dropout_rate, ) + ffd_output = positionwise_feed_forward( + enc_attn_output, + d_inner_hid, + d_model, ) + dec_output = post_process_layer( + enc_attn_output, + ffd_output, + "dan", # residual connection + dropout + layer normalization + dropout_rate, ) + return dec_output + + +def decoder(dec_input, + enc_output, + dec_slf_attn_bias, + dec_enc_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.): + """ + The decoder is composed of a stack of identical decoder_layer layers. + """ + for i in range(n_layer): + dec_output = decoder_layer( + dec_input, + enc_output, + dec_slf_attn_bias, + dec_enc_attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, ) + dec_input = dec_output + return dec_output + + +def transformer( + src_vocab_size, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + src_pad_idx, + trg_pad_idx, + pos_pad_idx, ): + file_obj = fluid.layers.open_recordio_file( + filename='./wmt16.recordio', + shapes=[ + [batch_size * max_length, 1], + [batch_size * max_length, 1], + [batch_size * max_length, 1], + [batch_size * max_length, 1], + [batch_size, n_head, max_length, max_length], + [batch_size, n_head, max_length, max_length], + [batch_size, n_head, max_length, max_length], + [batch_size * max_length, 1], + [batch_size * max_length, 1], + ], + dtypes=[ + 'int64', + 'int64', + 'int64', + 'int64', + 'float32', + 'float32', + 'float32', + 'int64', + 'float32', + ], + lod_levels=[0] * 9) + + src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, gold, weights = fluid.layers.read_file( + file_obj) + + enc_input = prepare_encoder( + src_word, + src_pos, + src_vocab_size, + d_model, + src_pad_idx, + max_length, + dropout_rate, ) + enc_output = encoder( + enc_input, + src_slf_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, ) + + dec_input = prepare_decoder( + trg_word, + trg_pos, + trg_vocab_size, + d_model, + trg_pad_idx, + max_length, + dropout_rate, ) + dec_output = decoder( + dec_input, + enc_output, + trg_slf_attn_bias, + trg_src_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, ) + + # TODO(guosheng): Share the weight matrix between the embedding layers and + # the pre-softmax linear transformation. + predict = layers.reshape( + x=layers.fc(input=dec_output, + size=trg_vocab_size, + param_attr=fluid.initializer.Xavier(uniform=False), + bias_attr=False, + num_flatten_dims=2), + shape=[-1, trg_vocab_size], + act="softmax") + + cost = layers.cross_entropy(input=predict, label=gold) + weighted_cost = cost * weights + return layers.reduce_sum(weighted_cost) diff --git a/python/paddle/reader/__init__.py b/python/paddle/reader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b059735a924d58714cd88a761eb83143f1192d6 --- /dev/null +++ b/python/paddle/reader/__init__.py @@ -0,0 +1,74 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +At training and testing time, PaddlePaddle programs need to read data. To ease +the users' work to write data reading code, we define that + +- A *reader* is a function that reads data (from file, network, random number + generator, etc) and yields data items. +- A *reader creator* is a function that returns a reader function. +- A *reader decorator* is a function, which accepts one or more readers, and + returns a reader. +- A *batch reader* is a function that reads data (from *reader*, file, network, + random number generator, etc) and yields a batch of data items. + +##################### +Data Reader Interface +##################### + +Indeed, *data reader* doesn't have to be a function that reads and yields data +items. It can be any function with no parameter that creates a iterable +(anything can be used in :code:`for x in iterable`)\: + +.. code-block:: python + + iterable = data_reader() + +Element produced from the iterable should be a **single** entry of data, +**not** a mini batch. That entry of data could be a single item, or a tuple of +items. +Item should be of `supported type `_ (e.g., numpy 1d +array of float32, int, list of int) + +An example implementation for single item data reader creator: + +.. code-block:: python + + def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader + +An example implementation for multiple item data reader creator: + +.. code-block:: python + + def reader_creator_random_image_and_label(width, height, label): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height), label + return reader + + +TODO(yuyang18): Should we add whole design doc here? +""" + +import decorator +from decorator import * + +import creator + +__all__ = decorator.__all__ + ['creator'] diff --git a/python/paddle/reader/creator.py b/python/paddle/reader/creator.py new file mode 100644 index 0000000000000000000000000000000000000000..4c905d959fad4e8c1a8826ce8dc60c5fa834514d --- /dev/null +++ b/python/paddle/reader/creator.py @@ -0,0 +1,85 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Creator package contains some simple reader creator, which could +be used in user program. +""" + +__all__ = ['np_array', 'text_file', 'recordio'] + + +def np_array(x): + """ + Creates a reader that yields elements of x, if it is a + numpy vector. Or rows of x, if it is a numpy matrix. + Or any sub-hyperplane indexed by the highest dimension. + + :param x: the numpy array to create reader from. + :returns: data reader created from x. + """ + + def reader(): + if x.ndim < 1: + yield x + + for e in x: + yield e + + return reader + + +def text_file(path): + """ + Creates a data reader that outputs text line by line from given text file. + Trailing new line ('\\\\n') of each line will be removed. + + :path: path of the text file. + :returns: data reader of text file + """ + + def reader(): + f = open(path, "r") + for l in f: + yield l.rstrip('\n') + f.close() + + return reader + + +def recordio(paths, buf_size=100): + """ + Creates a data reader from given RecordIO file paths separated by ",", + glob pattern is supported. + :path: path of recordio files, can be a string or a string list. + :returns: data reader of recordio files. + """ + + import recordio as rec + import paddle.reader.decorator as dec + import cPickle as pickle + + def reader(): + if isinstance(paths, basestring): + path = paths + else: + path = ",".join(paths) + f = rec.reader(path) + while True: + r = f.read() + if r is None: + break + yield pickle.loads(r) + f.close() + + return dec.buffered(reader, buf_size) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..44a6e344630bb35d28ee29078bf8727053a24bef --- /dev/null +++ b/python/paddle/reader/decorator.py @@ -0,0 +1,405 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', + 'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader' +] + +from threading import Thread +import subprocess + +from Queue import Queue +import itertools +import random +import zlib + + +def map_readers(func, *readers): + """ + Creates a data reader that outputs return value of function using + output of each data readers as arguments. + + :param func: function to use. The type of func should be (Sample) => Sample + :type: callable + :param readers: readers whose outputs will be used as arguments of func. + :return: the created data reader. + :rtype: callable + """ + + def reader(): + rs = [] + for r in readers: + rs.append(r()) + for e in itertools.imap(func, *rs): + yield e + + return reader + + +def shuffle(reader, buf_size): + """ + Creates a data reader whose data output is shuffled. + + Output from the iterator that created by original reader will be + buffered into shuffle buffer, and then shuffled. The size of shuffle buffer + is determined by argument buf_size. + + :param reader: the original reader whose output will be shuffled. + :type reader: callable + :param buf_size: shuffle buffer size. + :type buf_size: int + + :return: the new reader whose output is shuffled. + :rtype: callable + """ + + def data_reader(): + buf = [] + for e in reader(): + buf.append(e) + if len(buf) >= buf_size: + random.shuffle(buf) + for b in buf: + yield b + buf = [] + + if len(buf) > 0: + random.shuffle(buf) + for b in buf: + yield b + + return data_reader + + +def chain(*readers): + """ + Creates a data reader whose output is the outputs of input data + readers chained together. + + If input readers output following data entries: + [0, 0, 0] + [1, 1, 1] + [2, 2, 2] + The chained reader will output: + [0, 0, 0, 1, 1, 1, 2, 2, 2] + + :param readers: input readers. + :return: the new data reader. + :rtype: callable + """ + + def reader(): + rs = [] + for r in readers: + rs.append(r()) + + for e in itertools.chain(*rs): + yield e + + return reader + + +class ComposeNotAligned(ValueError): + pass + + +def compose(*readers, **kwargs): + """ + Creates a data reader whose output is the combination of input readers. + + If input readers output following data entries: + (1, 2) 3 (4, 5) + The composed reader will output: + (1, 2, 3, 4, 5) + + :param readers: readers that will be composed together. + :param check_alignment: if True, will check if input readers are aligned + correctly. If False, will not check alignment and trailing outputs + will be discarded. Defaults to True. + :type check_alignment: bool + + :return: the new data reader. + + :raises ComposeNotAligned: outputs of readers are not aligned. + Will not raise when check_alignment is set to False. + """ + check_alignment = kwargs.pop('check_alignment', True) + + def make_tuple(x): + if isinstance(x, tuple): + return x + else: + return (x, ) + + def reader(): + rs = [] + for r in readers: + rs.append(r()) + if not check_alignment: + for outputs in itertools.izip(*rs): + yield sum(map(make_tuple, outputs), ()) + else: + for outputs in itertools.izip_longest(*rs): + for o in outputs: + if o is None: + # None will be not be present if compose is aligned + raise ComposeNotAligned( + "outputs of readers are not aligned.") + yield sum(map(make_tuple, outputs), ()) + + return reader + + +def buffered(reader, size): + """ + Creates a buffered data reader. + + The buffered data reader will read and save data entries into a + buffer. Reading from the buffered data reader will proceed as long + as the buffer is not empty. + + :param reader: the data reader to read from. + :type reader: callable + :param size: max buffer size. + :type size: int + + :returns: the buffered data reader. + """ + + class EndSignal(): + pass + + end = EndSignal() + + def read_worker(r, q): + for d in r: + q.put(d) + q.put(end) + + def data_reader(): + r = reader() + q = Queue(maxsize=size) + t = Thread( + target=read_worker, args=( + r, + q, )) + t.daemon = True + t.start() + e = q.get() + while e != end: + yield e + e = q.get() + + return data_reader + + +def firstn(reader, n): + """ + Limit the max number of samples that reader could return. + + :param reader: the data reader to read from. + :type reader: callable + :param n: the max number of samples that return. + :type n: int + :return: the decorated reader. + :rtype: callable + """ + + # TODO(yuyang18): Check if just drop the reader, could clean the opened + # resource or not? + + def firstn_reader(): + for i, item in enumerate(reader()): + if i == n: + break + yield item + + return firstn_reader + + +class XmapEndSignal(): + pass + + +def xmap_readers(mapper, reader, process_num, buffer_size, order=False): + """ + Use multiprocess to map samples from reader by a mapper defined by user. + And this function contains a buffered decorator. + :param mapper: a function to map sample. + :type mapper: callable + :param reader: the data reader to read from + :type reader: callable + :param process_num: process number to handle original sample + :type process_num: int + :param buffer_size: max buffer size + :type buffer_size: int + :param order: keep the order of reader + :type order: bool + :return: the decarated reader + :rtype: callable + """ + end = XmapEndSignal() + + # define a worker to read samples from reader to in_queue + def read_worker(reader, in_queue): + for i in reader(): + in_queue.put(i) + in_queue.put(end) + + # define a worker to read samples from reader to in_queue with order flag + def order_read_worker(reader, in_queue): + in_order = 0 + for i in reader(): + in_queue.put((in_order, i)) + in_order += 1 + in_queue.put(end) + + # define a worker to handle samples from in_queue by mapper + # and put mapped samples into out_queue + def handle_worker(in_queue, out_queue, mapper): + sample = in_queue.get() + while not isinstance(sample, XmapEndSignal): + r = mapper(sample) + out_queue.put(r) + sample = in_queue.get() + in_queue.put(end) + out_queue.put(end) + + # define a worker to handle samples from in_queue by mapper + # and put mapped samples into out_queue by order + def order_handle_worker(in_queue, out_queue, mapper, out_order): + ins = in_queue.get() + while not isinstance(ins, XmapEndSignal): + order, sample = ins + r = mapper(sample) + while order != out_order[0]: + pass + out_queue.put(r) + out_order[0] += 1 + ins = in_queue.get() + in_queue.put(end) + out_queue.put(end) + + def xreader(): + in_queue = Queue(buffer_size) + out_queue = Queue(buffer_size) + out_order = [0] + # start a read worker in a thread + target = order_read_worker if order else read_worker + t = Thread(target=target, args=(reader, in_queue)) + t.daemon = True + t.start() + # start several handle_workers + target = order_handle_worker if order else handle_worker + args = (in_queue, out_queue, mapper, out_order) if order else ( + in_queue, out_queue, mapper) + workers = [] + for i in xrange(process_num): + worker = Thread(target=target, args=args) + worker.daemon = True + workers.append(worker) + for w in workers: + w.start() + + sample = out_queue.get() + while not isinstance(sample, XmapEndSignal): + yield sample + sample = out_queue.get() + finish = 1 + while finish < process_num: + sample = out_queue.get() + if isinstance(sample, XmapEndSignal): + finish += 1 + else: + yield sample + + return xreader + + +def _buf2lines(buf, line_break="\n"): + # FIXME: line_break should be automatically configured. + lines = buf.split(line_break) + return lines[:-1], lines[-1] + + +class PipeReader: + """ + PipeReader read data by stream from a command, take it's + stdout into a pipe buffer and redirect it to the parser to + parse, then yield data as your desired format. + + You can using standard linux command or call another program + to read data, from HDFS, Ceph, URL, AWS S3 etc: + + .. code-block:: python + cmd = "hadoop fs -cat /path/to/some/file" + cmd = "cat sample_file.tar.gz" + cmd = "curl http://someurl" + cmd = "python print_s3_bucket.py" + + An example: + + .. code-block:: python + + def example_reader(): + for f in myfiles: + pr = PipeReader("cat %s"%f) + for l in pr.get_line(): + sample = l.split(" ") + yield sample + """ + + def __init__(self, command, bufsize=8192, file_type="plain"): + if not isinstance(command, str): + raise TypeError("left_cmd must be a string") + if file_type == "gzip": + self.dec = zlib.decompressobj( + 32 + zlib.MAX_WBITS) # offset 32 to skip the header + self.file_type = file_type + self.bufsize = bufsize + self.process = subprocess.Popen( + command.split(" "), bufsize=bufsize, stdout=subprocess.PIPE) + + def get_line(self, cut_lines=True, line_break="\n"): + """ + :param cut_lines: cut buffer to lines + :type cut_lines: bool + :param line_break: line break of the file, like \n or \r + :type line_break: string + + :return: one line or a buffer of bytes + :rtype: string + """ + remained = "" + while True: + buff = self.process.stdout.read(self.bufsize) + if buff: + if self.file_type == "gzip": + decomp_buff = self.dec.decompress(buff) + elif self.file_type == "plain": + decomp_buff = buff + else: + raise TypeError("file_type %s is not allowed" % + self.file_type) + + if cut_lines: + lines, remained = _buf2lines(''.join( + [remained, decomp_buff]), line_break) + for line in lines: + yield line + else: + yield decomp_buff + else: + break diff --git a/python/paddle/reader/tests/CMakeLists.txt b/python/paddle/reader/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..107d5912e1567e0c8721987a281272c7feb51e63 --- /dev/null +++ b/python/paddle/reader/tests/CMakeLists.txt @@ -0,0 +1,2 @@ +py_test(creator_test SRCS creator_test.py) +py_test(decorator_test SRCS decorator_test.py) diff --git a/python/paddle/reader/tests/__init__.py b/python/paddle/reader/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eca2dce114b069bf9b455d77ce670d73b5047fd2 --- /dev/null +++ b/python/paddle/reader/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/reader/tests/creator_test.py b/python/paddle/reader/tests/creator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c4238c12a74759d52eb09f31ce1126cc93dd3489 --- /dev/null +++ b/python/paddle/reader/tests/creator_test.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright PaddlePaddle contributors. All Rights Reservedd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import unittest +import numpy as np +import paddle.reader.creator + + +class TestNumpyArray(unittest.TestCase): + def test_numpy_array(self): + l = [[1, 2, 3], [4, 5, 6]] + x = np.array(l, np.int32) + reader = paddle.reader.creator.np_array(x) + for idx, e in enumerate(reader()): + self.assertItemsEqual(e, l[idx]) + + +class TestTextFile(unittest.TestCase): + def test_text_file(self): + path = os.path.join(os.path.dirname(__file__), "test_data_creator.txt") + reader = paddle.reader.creator.text_file(path) + for idx, e in enumerate(reader()): + self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1)) + + +class TestRecordIO(unittest.TestCase): + def do_test(self, path): + reader = paddle.reader.creator.recordio(path) + idx = 0 + for e in reader(): + if idx == 0: + self.assertEqual(e, (1, 2, 3)) + elif idx == 1: + self.assertEqual(e, (4, 5, 6)) + idx += 1 + self.assertEqual(idx, 2) + + def test_recordIO(self): + self.do_test( + os.path.join( + os.path.dirname(__file__), "test_reader_recordio.dat")) + self.do_test([ + os.path.join( + os.path.dirname(__file__), "test_reader_recordio.dat") + ]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/reader/tests/decorator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bee24d3b6579db5e99ec66931df201fdf9e1af07 --- /dev/null +++ b/python/paddle/reader/tests/decorator_test.py @@ -0,0 +1,178 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import unittest + +import paddle.reader + + +def reader_creator_10(dur): + def reader(): + for i in range(10): + # this invocation helps testing paddle.reader.buffer + time.sleep(dur) + yield i + + return reader + + +class TestMap(unittest.TestCase): + def test_map(self): + d = {"h": 0, "i": 1} + + def tokenize(x): + return d[x] + + def read(): + yield "h" + yield "i" + + r = paddle.reader.map_readers(tokenize, read) + for i, e in enumerate(r()): + self.assertEqual(e, i) + + +class TestBuffered(unittest.TestCase): + def test_read(self): + for size in range(20): + b = paddle.reader.buffered(reader_creator_10(0), size) + c = 0 + for i in b(): + self.assertEqual(i, c) + c += 1 + self.assertEqual(c, 10) + + def test_buffering(self): + # read have 30ms delay. + b = paddle.reader.buffered(reader_creator_10(0.03), 10) + last_time = time.time() + for idx, i in enumerate(b()): + elapsed_time = time.time() - last_time + if i == 0: + time.sleep(0.3) + else: + # read time should be short, meaning already buffered. + self.assertLess(elapsed_time, 0.05) + last_time = time.time() + + +class TestCompose(unittest.TestCase): + def test_compse(self): + reader = paddle.reader.compose( + reader_creator_10(0), reader_creator_10(0)) + for idx, e in enumerate(reader()): + self.assertEqual(e, (idx, idx)) + + def test_compose_not_aligned(self): + total = 0 + reader = paddle.reader.compose( + paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)), + reader_creator_10(0)) + with self.assertRaises(paddle.reader.ComposeNotAligned): + for e in reader(): + total += 1 + # expecting 10, not 20 + self.assertEqual(total, 10) + + def test_compose_not_aligned_no_check(self): + total = 0 + reader = paddle.reader.compose( + paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)), + reader_creator_10(0), + check_alignment=False) + for e in reader(): + total += 1 + # expecting 10, not 20 + self.assertEqual(total, 10) + + +class TestChain(unittest.TestCase): + def test_chain(self): + c = paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)) + idx = 0 + for e in c(): + self.assertEqual(e, idx % 10) + idx += 1 + self.assertEqual(idx, 20) + + +class TestShuffle(unittest.TestCase): + def test_shuffle(self): + case = [(0, True), (1, True), (10, False), (100, False)] + a = reader_creator_10(0) + for size, checkEq in case: + s = paddle.reader.shuffle(a, size) + total = 0 + for idx, e in enumerate(s()): + if checkEq: + self.assertEqual(idx, e) + total += 1 + self.assertEqual(total, 10) + + +class TestXmap(unittest.TestCase): + def test_xmap(self): + def mapper(x): + return (x + 1) + + orders = (True, False) + thread_nums = (1, 2, 4, 8, 16) + buffered_size = (1, 2, 4, 8, 16) + for order in orders: + for tNum in thread_nums: + for size in buffered_size: + reader = paddle.reader.xmap_readers(mapper, + reader_creator_10(0), + tNum, size, order) + for n in xrange(3): + result = [] + for i in reader(): + result.append(i) + if not order: + result.sort() + for idx, e in enumerate(result): + self.assertEqual(e, mapper(idx)) + + +class TestPipeReader(unittest.TestCase): + def test_pipe_reader(self): + def example_reader(myfiles): + for f in myfiles: + pr = paddle.reader.PipeReader("cat %s" % f, bufsize=128) + for l in pr.get_line(): + yield l + + import tempfile + + records = [str(i) for i in xrange(5)] + temp = tempfile.NamedTemporaryFile() + try: + with open(temp.name, 'w') as f: + for r in records: + f.write('%s\n' % r) + + result = [] + for r in example_reader([temp.name]): + result.append(r) + + for idx, e in enumerate(records): + self.assertEqual(e, result[idx]) + finally: + # delete the temporary file + temp.close() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/reader/tests/test_data_creator.txt b/python/paddle/reader/tests/test_data_creator.txt new file mode 100644 index 0000000000000000000000000000000000000000..a2a8d47d43868d369083808497697da79e620e31 --- /dev/null +++ b/python/paddle/reader/tests/test_data_creator.txt @@ -0,0 +1,3 @@ +0 1 +2 3 +4 5 diff --git a/python/paddle/reader/tests/test_reader_recordio.dat b/python/paddle/reader/tests/test_reader_recordio.dat new file mode 100644 index 0000000000000000000000000000000000000000..a99a35bb829e066c4845d0b85b96cd1eb3a12491 Binary files /dev/null and b/python/paddle/reader/tests/test_reader_recordio.dat differ diff --git a/python/paddle/reader/tests/test_recordio_creator.dat b/python/paddle/reader/tests/test_recordio_creator.dat new file mode 100644 index 0000000000000000000000000000000000000000..17aa89b6796184407e83246d3f342a55a66b4a69 Binary files /dev/null and b/python/paddle/reader/tests/test_recordio_creator.dat differ diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 186b91c226accbe1c2d5465d6244b9438eec9979..460eb3b3491a0626eb6ecbf89132e24177a2adaa 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -471,6 +471,7 @@ class Input(Cfg): maxout=None, spp=None, pad=None, + upsample=None, format=None, nnz=None, is_static=None, @@ -983,6 +984,13 @@ class Pad(Cfg): self.add_keys(locals()) +@config_class +class Upsample(Cfg): + def __init__(self, scale, scale_y, pad_out_x, pad_out_y, upsample_size, + upsample_size_y): + self.add_keys(locals()) + + @config_class class Norm(Cfg): def __init__(self, @@ -2380,6 +2388,46 @@ class SpatialPyramidPoolLayer(LayerBase): self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels) +@config_layer('upsample') +class UpsampleLayer(LayerBase): + def __init__(self, name, inputs, **xargs): + super(UpsampleLayer, self).__init__( + name, 'upsample', 0, inputs=inputs, **xargs) + + input_layer = self.get_input_layer(0) + image_conf = self.config.inputs[0].upsample_conf.image_conf + image_conf.img_size = input_layer.width + image_conf.img_size_y = input_layer.height + image_conf.channels = input_layer.size / (input_layer.width * + input_layer.height) + + upsample = self.inputs[0].upsample + output_x = 0 + output_y = 0 + output_size = 0 + + if upsample.scale: + self.config.inputs[0].upsample_conf.scale = upsample.scale + self.config.inputs[0].upsample_conf.scale_y = upsample.scale_y + output_x = input_layer.width * upsample.scale + output_y = input_layer.height * upsample.scale_y + self.config.inputs[0].upsample_conf.pad_out_x = upsample.pad_out_x + self.config.inputs[0].upsample_conf.pad_out_y = upsample.pad_out_y + if upsample.upsample_size: + self.config.inputs[ + 0].upsample_conf.upsample_size = upsample.upsample_size + self.config.inputs[ + 0].upsample_conf.upsample_size_y = upsample.upsample_size_y + output_x = upsample.upsample_size + output_y = upsample.upsample_size_y + + output_size = image_conf.channels * output_x * output_y + + self.set_layer_height_width(output_y, output_x) + self.set_layer_depth(input_layer.depth) + self.set_layer_size(output_size) + + @config_layer('pad') class PadLayer(LayerBase): def __init__(self, name, inputs, **xargs): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 3684d1e8f73a21d9c6f2a5985f8b40ed6984057b..ebc31b23e0f5504b4bebccabe996b054c7fbce3b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -148,6 +148,7 @@ __all__ = [ 'resize_layer', 'sub_seq_layer', 'scale_sub_region_layer', + 'upsample_layer', 'factorization_machine', ] @@ -166,6 +167,7 @@ class LayerType(object): SEQUENCE_RESHAPE = 'seqreshape' POOLING_MAX = 'max' POOLING_AVG = 'average' + UPSAMPLE_LAYER = 'upsample' FC_LAYER = 'fc' COST = 'cost' COSINE_SIM_VEC = 'cos_vm' @@ -3014,6 +3016,83 @@ def img_pool3d_layer(input, size=l.config.size) +@wrap_name_default("upsample") +@layer_support() +def upsample_layer(input, + name=None, + scale=None, + scale_y=None, + upsample_size=None, + upsample_size_y=None, + pad_out_x=False, + pad_out_y=False, + layer_attr=None): + """ + The DePooling process. + Inputs should be a list of length 2. The first input is a layer, + and the second input should be the MaxWithMaskPoolingLayer + + The example usage is: + + .. code-block:: python + pool1 = paddle.v2.layer.img_pool(input=input, pool_size=2, stride=2, + pool_type=paddle.pooling.MaxWithMask()) + upsample = paddle.v2.layer.upsample(input=[layer1, pool1]) + + :param name: The name of this layer. It is optional. + :type name: basestring + :param input: contains an input layer and a MaxWithMaskPoolingLayer + :type input: list | tuple | collections.Sequence + :param scale: outputSize = scale * inputSize + :type scale: int | list | tuple | . + :param scale_y: scale_y will be equal to scale, if it's value is None, + :type scale: int | None. + :param upsample_size: specify the outputSize. + :type upsample_size: int | list | tuple. + :param upsample_size_y: specify the y dimension outputSize. + :type upsample_size_y: int. + :param pad_out_x: specify exact x dimension size. This parameter only works when scale is 2 + :type pad_out_x: bool. + :param pad_out_y: specify exact y dimension size. This parameter only works when scale is 2 + :type pad_out_y: bool. + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + + assert (scale is not None) or (upsample_size is not None), \ + 'scale or upsample_size, there must be one to be designated' + + assert len(input) == 2, 'layer input size must be 2' + + assert input[1].layer_type == LayerType.POOL_LAYER, \ + 'the second input should be the MaxPoolWithMaskLayer' + + scale_y = scale \ + if scale is not None else scale_y + upsample_size_y = upsample_size \ + if upsample_size is not None else upsample_size_y + + layer_type = LayerType.UPSAMPLE_LAYER + + layer = Layer( + name=name, + type=layer_type, + inputs=[ + Input( + input[0].name, + upsample=Upsample(scale, scale_y, pad_out_x, pad_out_y, + upsample_size, upsample_size_y)), + Input(input[1].name) + ], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + + sz = layer.config.size + + return LayerOutput(name, layer_type=layer_type, parents=input, size=sz) + + @wrap_name_default("spp") @layer_support() def spp_layer(input, diff --git a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt index 580aef935b5cec385a88fb0b4f5b9a5ddeddb40c..30e0b9906c406d846d4b086a1a1c89587394afea 100644 --- a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt +++ b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt @@ -1,17 +1,17 @@ #################### test_config_parser ######################### add_test(NAME layers_test - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/layers_test.py WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle) add_test(NAME test_reset_hook - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle) add_paddle_exe(protobuf_equal ProtobufEqualMain.cpp) add_test(NAME test_layerHelpers - COMMAND - ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ + ${PADDLE_BINARY_DIR}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal ) diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 8a318879630cd491573afcaf798dda2ca75e335d..44a75a60cc78e85f85d111a911999b7812db0f49 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -2,7 +2,6 @@ set -e cd `dirname $0` -export PYTHONPATH=$PWD/../../../../ protostr=$PWD/protostr . file_list.sh diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index c1acbecd9c313b02d6d33d2d04fd33fc1a8b026e..38056fe0a9496bcb5de76634bbab267e324dc2a4 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -36,7 +36,7 @@ __all__ = [ 'cifar', 'movielens', 'conll05', - 'sentiment' + 'sentiment', 'uci_housing', 'wmt14', 'wmt16', diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 52f5b947fdec55eea45b9d34eddd576c981fa97c..14b64742fd09bf6c197c5d1aa2354271293df239 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -15,7 +15,7 @@ import numpy import collections import topology -import minibatch +import paddle import cPickle __all__ = ['infer', 'Inference'] @@ -80,7 +80,7 @@ class Inference(object): for each_sample in input: yield each_sample - reader = minibatch.batch(__reader_impl__, batch_size=batch_size) + reader = paddle.batch(__reader_impl__, batch_size=batch_size) self.__gradient_machine__.start() for data_batch in reader(): diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 6a2bb8d337b7667aa2b1e3ef0815bb80f6e38d6a..a188a03eb3698c972de92c9807f1bdb71a249330 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -20,7 +20,7 @@ The primary usage shows below. .. code-block:: python - import paddle.v2 as paddle + import paddle img = paddle.layer.data(name='img', type=paddle.data_type.dense_vector(784)) hidden = paddle.layer.fc(input=img, size=200) diff --git a/python/paddle/v2/reader/__init__.py b/python/paddle/v2/reader/__init__.py index 3b059735a924d58714cd88a761eb83143f1192d6..12efdc4a0fec83fed57bdcbf687aaec69d13ba91 100644 --- a/python/paddle/v2/reader/__init__.py +++ b/python/paddle/v2/reader/__init__.py @@ -50,7 +50,7 @@ An example implementation for single item data reader creator: def reader(): while True: yield numpy.random.uniform(-1, 1, size=width*height) - return reader + return reader An example implementation for multiple item data reader creator: @@ -60,7 +60,7 @@ An example implementation for multiple item data reader creator: def reader(): while True: yield numpy.random.uniform(-1, 1, size=width*height), label - return reader + return reader TODO(yuyang18): Should we add whole design doc here? diff --git a/python/setup.py.in b/python/setup.py.in index 4cb5409524457b7bc5a99c88a0dbbfc8834923fa..a811b509a90b8b0d84451f54462a0308c062d022 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -58,25 +58,27 @@ def mkl(): 'istaged': ISTAGED, 'with_mkl': '@WITH_MKL@'}) -write_version_py(filename='@PADDLE_SOURCE_DIR@/python/paddle/version.py') +write_version_py(filename='@PADDLE_BINARY_DIR@/python/paddle/version.py') packages=['paddle', 'paddle.utils', + 'paddle.dataset', + 'paddle.reader', 'paddle.fluid', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.layers'] -if '${WITH_FLUID}'== 'OFF': +if '${WITH_FLUID_ONLY}'== 'OFF': packages+=['paddle.proto', 'paddle.trainer', 'paddle.trainer_config_helpers', 'paddle.v2', - 'paddle.v2.dataset', - 'paddle.v2.reader', 'paddle.v2.master', 'paddle.v2.plot', + 'paddle.v2.reader', + 'paddle.v2.dataset', 'py_paddle'] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: @@ -87,7 +89,7 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: # the prefix is sys.prefix which should always be usr paddle_bins = '' -if '${WITH_FLUID}'== 'OFF': +if '${WITH_FLUID_ONLY}'== 'OFF': paddle_bin_dir = 'opt/paddle/bin' paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', @@ -95,19 +97,20 @@ if '${WITH_FLUID}'== 'OFF': '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] package_data={'paddle.fluid': ['core.so']} -if '${WITH_FLUID}'== 'OFF': +if '${WITH_FLUID_ONLY}'== 'OFF': package_data['paddle.v2.master']=['libpaddle_master.so'] package_data['py_paddle']=['*.py','_swig_paddle.so'] package_dir={ - '': '${CMAKE_CURRENT_SOURCE_DIR}', + '': '${PADDLE_BINARY_DIR}/python', # The paddle.fluid.proto will be generated while compiling. # So that package points to other directory. 'paddle.fluid.proto.profiler': '${PADDLE_BINARY_DIR}/paddle/fluid/platform', 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', + 'paddle.fluid': '${PADDLE_BINARY_DIR}/python/paddle/fluid', } -if '${WITH_FLUID}'== 'OFF': - package_dir['py_paddle']='${PADDLE_SOURCE_DIR}/paddle/py_paddle' +if '${WITH_FLUID_ONLY}'== 'OFF': + package_dir['py_paddle']='${PADDLE_BINARY_DIR}/python/py_paddle' paddle_rt_lib_dir = 'lib' diff --git a/tools/aws_benchmarking/README.md b/tools/aws_benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..837fcbb8512bce027ecd09a7f39b806151e9154b --- /dev/null +++ b/tools/aws_benchmarking/README.md @@ -0,0 +1,160 @@ +# AWS benchmark testing tool +This is an automation tool for deploying paddlepaddle benchmark testing to AWS. + +## Features + + - subnet creation to fit just the amount of ec2 instances required. + - pserver and trainer ec2 instances allocation, and instance state verification + - nvidia-docker ready for GPU training + - Instances and network element garbage collection when a task is accomplished or an error occurred + - Test log is collected in realtime + - Web service for checking log or tearing down the testing setup + - No testing code change needed + - Lots of optional configuration options + + ## Usages + + ### Prerequisites + + - You have a working AWS account + - You have [AWS Command Line Interface](https://aws.amazon.com/cli/) installed + - Your AWS cli is bind with a account which has `AmazonEC2FullAccess` permission, and it's set as default credential. + - You have key pair created and pem file downloaded. + - You have a default VPC in the region you want to run the test. + - You have a Security Group created for the VPC mentioned above, which allows port 22 and the port you want to expose your control web service (5436 by default) + - If your test is supposed to run in a GPU machine, especially a multi card GPU machine (p2, p3 series), you might need to contact amazon to raise the limit which allows no more than 1 GPU instance at a time. + + ### Start a benchmark test + +#### Create training image + +*What to expect in this step:* + +*You will have your training logic packed with paddle runtime in a docker image, and be able to be picked up by AWS instance for training.* + +Training python script and PaddlePaddle runtime are supposed to be packed into one docker image. Use PaddlePaddle production images as base image and create the training images with the docker file as follows: + +```Dockerfile +FROM paddlepaddle/paddle:latest-gpu + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "my_training.py"] +``` + +***Please Note*** +Training nodes will run your `ENTRYPOINT` script with the following environment variables: + + - `TASK_NAME`: unique name to identify this training process. + - `TRAINING_ROLE`: current node's role in this training process, either "PSERVER" or "TRAINER" + - `PSERVER_HOSTS`: comma separated value of pserver end points, I.E. "192.168.1.2:5436,192.168.1.3:5436" + - `PSERVERS`: same as above + - `TRAINERS`: trainer count + - `SERVER_ENDPOINT`: current server end point if the node role is a pserver + - `TRAINER_INDEX`: an integer to identify the index of current trainer if the node role is a trainer. + - `PADDLE_INIT_TRAINER_ID`: same as above + + Now we have a working distributed training script which takes advantage of node environment variables and docker file to generate the training image. Run the following command: + + ```bash + docker build -t myreponname/paddle_benchmark . + ``` + + Now you have the image built and tagged with `myreponame/paddle_benchmark`, let's push it to dockerhub so that it can be picked up by out AWS instance. + + ```bash + docker push myreponame/paddle_benchmark + ``` + +#### Create instances and start training + +*What to expect in this step* + +*you will be asked to provide some basic settings to config your training, and this tool will have your training started and monitored* + +Now let's start the training process: + +```bash +docker run -i -v $HOME/.aws:/root/.aws -v :/root/.pem \ +putcn/paddle_aws_client \ +--action create \ +--key_name \ +--security_group_id \ +--docker_image myreponame/paddle_benchmark \ +--pserver_count 2 \ +--trainer_count 2 +``` + +Now just wait until you see this: +``` +master server finished init process, visit http://XXX:XXX/status to check master log +``` +That means you can turn off your laptop and your cluster is creating instances, starting training process, collecting logs and eventually shut all pservers and trainers down when training is finished. + +#### Post creation operations + +To access the master log: + +```bash +docker run -i -v $HOME/.aws:/root/.aws \ +putcn/paddle_aws_client \ +--action status \ +--master_server_public_ip \ +--master_server_port +``` + +To tear down the training setup: + +```bash +docker run -i -v $HOME/.aws:/root/.aws \ +putcn/paddle_aws_client \ +--action cleanup \ +--master_server_public_ip \ +--master_server_port +``` + +To retrieve training logs +TBD + +### Tech details + +*What to expect in this step* + +*You will understand what is happening behind the scene, and how to check the training log, how to tear down the training on the fly, etc.* + +Let's understand what is happening under the hood when you run above command in your laptop + +![alt](diagram.png) + +There are 4 roles in the figure above: + - client: your laptop + - master: who tasks to aws api server to create/tear down instances, and monitor training process + - AWS api server: the one who actually creates and manages instances + - pservers and trainers: training instances + +When you run the `docker run` command above, what it actually does is to ask aws api service to create a subnet (step 1) and a master instance (step 2), and pass all the parameters the client collected or generated (step 3). The master is kept as minimum hardware config to keep the running cost low. + +Then when the master is up and running, it will ask the aws api server to create the heavy lifting training instances who are expensive to run (step 4). And the master will start training process as soon as they are done initializing (step 5). + +Meanwhile, the master will expose a web service for client to check training log or even tear the training setup down by a web service call. + +if you are creating the training with client docker container, and also monitoring your aws dashboard, you will initially see a instance tagged with `ROLE=MASTER` and `TASK_NAME=_master` starts, then you will see several instances tagged with `ROLE=PSERVER` and `ROLE=TRAINER` starts. +When the training is finished, pservers and trainers will be terminated. All their logs are kept in master node's docker env. + +Master exposes 4 major services: + + - GET `/status`: return master log + - GET `/logs`: return list of log file names + - GET `/log/`: return a particular log by log file name + - POST `/cleanup`: teardown the whole setup + + +### Parameters + +TBD, please refer to client/cluster_launcher.py for now + +### Trouble shooting + +TBD diff --git a/tools/aws_benchmarking/client/Dockerfile b/tools/aws_benchmarking/client/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..812c5d4bce0adff404577ce6b5fd3f0f4a91118c --- /dev/null +++ b/tools/aws_benchmarking/client/Dockerfile @@ -0,0 +1,7 @@ +FROM python:2.7.14-stretch + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "cluster_launcher.py"] \ No newline at end of file diff --git a/tools/aws_benchmarking/client/cluster_launcher.py b/tools/aws_benchmarking/client/cluster_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..594378ff8fc0744a4b11b1c11e2e3b270be7aed0 --- /dev/null +++ b/tools/aws_benchmarking/client/cluster_launcher.py @@ -0,0 +1,407 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import time +import math +import logging +import copy + +import netaddr +import boto3 +import namesgenerator +import paramiko +from scp import SCPClient +import requests + + +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--key_name', type=str, default="", help="required, key pair name") +parser.add_argument( + '--security_group_id', + type=str, + default="", + help="required, the security group id associated with your VPC") + +parser.add_argument( + '--vpc_id', + type=str, + default="", + help="The VPC in which you wish to run test") +parser.add_argument( + '--subnet_id', + type=str, + default="", + help="The Subnet_id in which you wish to run test") + +parser.add_argument( + '--pserver_instance_type', + type=str, + default="c5.2xlarge", + help="your pserver instance type, c5.2xlarge by default") +parser.add_argument( + '--trainer_instance_type', + type=str, + default="p2.8xlarge", + help="your trainer instance type, p2.8xlarge by default") + +parser.add_argument( + '--task_name', + type=str, + default="", + help="the name you want to identify your job") +parser.add_argument( + '--pserver_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, \ + use ami-1ae93962 for us-east-2") + +parser.add_argument( + '--pserver_command', type=str, default="", help="pserver start command") + +parser.add_argument( + '--trainer_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, \ + use ami-1ae93962 for us-west-2") + +parser.add_argument( + '--trainer_command', type=str, default="", help="trainer start command") + +parser.add_argument( + '--availability_zone', + type=str, + default="us-east-2a", + help="aws zone id to place ec2 instances") + +parser.add_argument( + '--trainer_count', type=int, default=1, help="Trainer count") + +parser.add_argument( + '--pserver_count', type=int, default=1, help="Pserver count") + +parser.add_argument( + '--action', type=str, default="create", help="create|cleanup|status") + +parser.add_argument('--pem_path', type=str, help="private key file") + +parser.add_argument( + '--pserver_port', type=str, default="5436", help="pserver port") + +parser.add_argument( + '--docker_image', type=str, default="busybox", help="training docker image") + +parser.add_argument( + '--master_server_port', type=int, default=5436, help="master server port") + +parser.add_argument( + '--master_server_public_ip', type=str, help="master server public ip") + +parser.add_argument( + '--master_docker_image', + type=str, + default="putcn/paddle_aws_master:latest", + help="master docker image id") + +parser.add_argument( + '--no_clean_up', + type=str2bool, + default=False, + help="whether to clean up after training") + +args = parser.parse_args() + +logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') + +ec2client = boto3.client('ec2') + + +def print_arguments(): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def create_subnet(): + # if no vpc id provided, list vpcs + logging.info("start creating subnet") + if not args.vpc_id: + logging.info("no vpc provided, trying to find the default one") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "isDefault", + "Values": ["true", ] + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No default VPC') + args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + + logging.info("default vpc fount with id %s and CidrBlock %s" % + (args.vpc_id, vpc_cidrBlock)) + + if not vpc_cidrBlock: + logging.info("trying to find cidrblock for vpc") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No VPC found') + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) + + # list subnets in vpc in order to create a new one + + logging.info("trying to find ip blocks for new subnet") + subnets_desc = ec2client.describe_subnets( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + + ips_taken = [] + for subnet_dec in subnets_desc["Subnets"]: + ips_taken.append(subnet_dec["CidrBlock"]) + + ip_blocks_avaliable = netaddr.IPSet( + [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) + # adding 10 addresses as buffer + cidr_prefix = 32 - math.ceil( + math.log(args.pserver_count + args.trainer_count + 10, 2)) + if cidr_prefix <= 16: + raise ValueError('Too many nodes to fit in current VPC') + + for ipnetwork in ip_blocks_avaliable.iter_cidrs(): + try: + subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() + logging.info("subnet ip block found %s" % (subnet_cidr)) + break + except Exception: + pass + + if not subnet_cidr: + raise ValueError( + 'No avaliable subnet to fit required nodes in current VPC') + + logging.info("trying to create subnet") + subnet_desc = ec2client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=args.vpc_id, + AvailabilityZone=args.availability_zone) + + subnet_id = subnet_desc["Subnet"]["SubnetId"] + + subnet_waiter = ec2client.get_waiter('subnet_available') + # sleep for 1s before checking its state + time.sleep(1) + subnet_waiter.wait(SubnetIds=[subnet_id, ]) + + logging.info("subnet created") + + logging.info("adding tags to newly created subnet") + ec2client.create_tags( + Resources=[subnet_id, ], + Tags=[{ + "Key": "Task_name", + 'Value': args.task_name + }]) + return subnet_id + + +def run_instances(image_id, instance_type, count=1, role="MASTER", cmd=""): + response = ec2client.run_instances( + ImageId=image_id, + InstanceType=instance_type, + MaxCount=count, + MinCount=count, + UserData=cmd, + DryRun=False, + InstanceInitiatedShutdownBehavior="stop", + KeyName=args.key_name, + Placement={'AvailabilityZone': args.availability_zone}, + NetworkInterfaces=[{ + 'DeviceIndex': 0, + 'SubnetId': args.subnet_id, + "AssociatePublicIpAddress": True, + 'Groups': args.security_group_ids + }], + TagSpecifications=[{ + 'ResourceType': "instance", + 'Tags': [{ + "Key": 'Task_name', + "Value": args.task_name + "_master" + }, { + "Key": 'Role', + "Value": role + }] + }]) + + instance_ids = [] + for instance in response["Instances"]: + instance_ids.append(instance["InstanceId"]) + + if len(instance_ids) > 0: + logging.info(str(len(instance_ids)) + " instance(s) created") + else: + logging.info("no instance created") + #create waiter to make sure it's running + + logging.info("waiting for instance to become accessible") + waiter = ec2client.get_waiter('instance_status_ok') + waiter.wait( + Filters=[{ + "Name": "instance-status.status", + "Values": ["ok"] + }, { + "Name": "instance-status.reachability", + "Values": ["passed"] + }, { + "Name": "instance-state-name", + "Values": ["running"] + }], + InstanceIds=instance_ids) + + instances_response = ec2client.describe_instances(InstanceIds=instance_ids) + + return instances_response["Reservations"][0]["Instances"] + + +def generate_task_name(): + return namesgenerator.get_random_name() + + +def init_args(): + + if not args.task_name: + args.task_name = generate_task_name() + logging.info("task name generated %s" % (args.task_name)) + + if not args.pem_path: + args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" + if args.security_group_id: + args.security_group_ids = (args.security_group_id, ) + + +def create(): + + init_args() + + # create subnet + if not args.subnet_id: + args.subnet_id = create_subnet() + + # create master node + + master_instance_response = run_instances( + image_id="ami-7a05351f", instance_type="t2.nano") + + logging.info("master server started") + + args.master_server_public_ip = master_instance_response[0][ + "PublicIpAddress"] + args.master_server_ip = master_instance_response[0]["PrivateIpAddress"] + + logging.info("master server started, master_ip=%s, task_name=%s" % + (args.master_server_public_ip, args.task_name)) + + # cp config file and pems to master node + + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect( + hostname=args.master_server_public_ip, username="ubuntu", pkey=ssh_key) + + with SCPClient(ssh_client.get_transport()) as scp: + scp.put(os.path.expanduser("~") + "/" + ".aws", + recursive=True, + remote_path='/home/ubuntu/') + scp.put(args.pem_path, + remote_path='/home/ubuntu/' + args.key_name + ".pem") + + logging.info("credentials and pem copied to master") + + # set arguments and start docker + kick_off_cmd = "docker run -d -v /home/ubuntu/.aws:/root/.aws/" + kick_off_cmd += " -v /home/ubuntu/" + args.key_name + ".pem:/root/" + args.key_name + ".pem" + kick_off_cmd += " -v /home/ubuntu/logs/:/root/logs/" + kick_off_cmd += " -p " + str(args.master_server_port) + ":" + str( + args.master_server_port) + kick_off_cmd += " " + args.master_docker_image + + args_to_pass = copy.copy(args) + args_to_pass.action = "serve" + del args_to_pass.pem_path + del args_to_pass.security_group_ids + del args_to_pass.master_docker_image + del args_to_pass.master_server_public_ip + for arg, value in sorted(vars(args_to_pass).iteritems()): + if value: + kick_off_cmd += ' --%s %s' % (arg, value) + + logging.info(kick_off_cmd) + stdin, stdout, stderr = ssh_client.exec_command(command=kick_off_cmd) + return_code = stdout.channel.recv_exit_status() + logging.info(return_code) + if return_code != 0: + raise Exception("Error while kicking off master") + + logging.info( + "master server finished init process, visit %s to check master log" % + (get_master_web_url("/status"))) + + +def cleanup(): + print requests.post(get_master_web_url("/cleanup")).text + + +def status(): + print requests.post(get_master_web_url("/status")).text + + +def get_master_web_url(path): + return "http://" + args.master_server_public_ip + ":" + str( + args.master_server_port) + path + + +if __name__ == "__main__": + print_arguments() + if args.action == "create": + if not args.key_name or not args.security_group_id: + raise ValueError("key_name and security_group_id are required") + create() + elif args.action == "cleanup": + if not args.master_server_public_ip: + raise ValueError("master_server_public_ip is required") + cleanup() + elif args.action == "status": + if not args.master_server_public_ip: + raise ValueError("master_server_public_ip is required") + status() diff --git a/tools/aws_benchmarking/client/requirements.txt b/tools/aws_benchmarking/client/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9454801f2025671cfd1a2c3b71cf4c2ac07cb8fb --- /dev/null +++ b/tools/aws_benchmarking/client/requirements.txt @@ -0,0 +1,6 @@ +netaddr==0.7.19 +boto3==1.6.21 +namesgenerator==0.3 +paramiko==2.4.1 +scp +requests diff --git a/tools/aws_benchmarking/diagram.png b/tools/aws_benchmarking/diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..b97909c5fe78b59d0e636ff73c2ed3e63a0be722 Binary files /dev/null and b/tools/aws_benchmarking/diagram.png differ diff --git a/tools/aws_benchmarking/server/Dockerfile b/tools/aws_benchmarking/server/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..333523abcdb6fbe7dc01bbaf7d32ce1d8e866028 --- /dev/null +++ b/tools/aws_benchmarking/server/Dockerfile @@ -0,0 +1,7 @@ +FROM python:2.7.14-stretch + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "cluster_master.py"] \ No newline at end of file diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py new file mode 100644 index 0000000000000000000000000000000000000000..21f85a5fc43e951897eb6b785367630abda722c0 --- /dev/null +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -0,0 +1,673 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import json +import math +import time +import threading +import logging + +import netaddr +import boto3 +import namesgenerator +import paramiko + +from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer + + +# You must have aws_access_key_id, aws_secret_access_key, region set in +# ~/.aws/credentials and ~/.aws/config +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--key_name', type=str, default="", help="required, key pair name") +parser.add_argument( + '--security_group_id', + type=str, + default="", + help="required, the security group id associated with your VPC") + +parser.add_argument( + '--vpc_id', + type=str, + default="", + help="The VPC in which you wish to run test") +parser.add_argument( + '--subnet_id', + type=str, + default="", + help="The Subnet_id in which you wish to run test") + +parser.add_argument( + '--pserver_instance_type', + type=str, + default="c5.2xlarge", + help="your pserver instance type, c5.2xlarge by default") +parser.add_argument( + '--trainer_instance_type', + type=str, + default="p2.8xlarge", + help="your trainer instance type, p2.8xlarge by default") + +parser.add_argument( + '--task_name', + type=str, + default="", + help="the name you want to identify your job") +parser.add_argument( + '--pserver_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-east-2" +) +parser.add_argument( + '--trainer_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-west-2" +) + +parser.add_argument( + '--availability_zone', + type=str, + default="us-east-2a", + help="aws zone id to place ec2 instances") + +parser.add_argument( + '--trainer_count', type=int, default=1, help="Trainer count") + +parser.add_argument( + '--pserver_count', type=int, default=1, help="Pserver count") + +parser.add_argument( + '--pserver_bash_file', + type=str, + default=os.path.join(os.path.dirname(__file__), "pserver.sh.template"), + help="pserver bash file path") + +parser.add_argument( + '--pserver_command', type=str, default="", help="pserver start command") + +parser.add_argument( + '--trainer_bash_file', + type=str, + default=os.path.join(os.path.dirname(__file__), "trainer.sh.template"), + help="trainer bash file path") + +parser.add_argument( + '--trainer_command', type=str, default="", help="trainer start command") + +parser.add_argument( + '--action', type=str, default="serve", help="create|cleanup|serve") + +parser.add_argument('--pem_path', type=str, help="private key file") + +parser.add_argument( + '--pserver_port', type=str, default="5436", help="pserver port") + +parser.add_argument( + '--docker_image', type=str, default="busybox", help="training docker image") + +parser.add_argument( + '--master_server_port', type=int, default=5436, help="master server port") + +parser.add_argument( + '--master_server_ip', type=str, default="", help="master server private ip") + +parser.add_argument( + '--no_clean_up', + type=str2bool, + default=False, + help="whether to clean up after training") + +args = parser.parse_args() + +ec2client = boto3.client('ec2') + +args.log_path = os.path.join(os.path.dirname(__file__), "logs/") + +logging.basicConfig( + filename=args.log_path + 'master.log', + level=logging.INFO, + format='%(asctime)s %(message)s') + +log_files = ["master.log"] + + +def create_subnet(): + # if no vpc id provided, list vpcs + logging.info("start creating subnet") + if not args.vpc_id: + logging.info("no vpc provided, trying to find the default one") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "isDefault", + "Values": ["true", ] + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No default VPC') + args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + + logging.info("default vpc fount with id %s and CidrBlock %s" % + (args.vpc_id, vpc_cidrBlock)) + + if not vpc_cidrBlock: + logging.info("trying to find cidrblock for vpc") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No VPC found') + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) + + # list subnets in vpc in order to create a new one + + logging.info("trying to find ip blocks for new subnet") + subnets_desc = ec2client.describe_subnets( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + + ips_taken = [] + for subnet_dec in subnets_desc["Subnets"]: + ips_taken.append(subnet_dec["CidrBlock"]) + + ip_blocks_avaliable = netaddr.IPSet( + [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) + # adding 10 addresses as buffer + cidr_prefix = 32 - math.ceil( + math.log(args.pserver_count + args.trainer_count + 10, 2)) + if cidr_prefix <= 16: + raise ValueError('Too many nodes to fit in current VPC') + + for ipnetwork in ip_blocks_avaliable.iter_cidrs(): + try: + subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() + logging.info("subnet ip block found %s" % (subnet_cidr)) + break + except Exception: + pass + + if not subnet_cidr: + raise ValueError( + 'No avaliable subnet to fit required nodes in current VPC') + + logging.info("trying to create subnet") + subnet_desc = ec2client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=args.vpc_id, + AvailabilityZone=args.availability_zone) + + subnet_id = subnet_desc["Subnet"]["SubnetId"] + + subnet_waiter = ec2client.get_waiter('subnet_available') + # sleep for 1s before checking its state + time.sleep(1) + subnet_waiter.wait(SubnetIds=[subnet_id, ]) + + logging.info("subnet created") + + logging.info("adding tags to newly created subnet") + ec2client.create_tags( + Resources=[subnet_id, ], + Tags=[{ + "Key": "Task_name", + 'Value': args.task_name + }]) + return subnet_id + + +def generate_task_name(): + return namesgenerator.get_random_name() + + +def script_to_str(file_path): + if not file_path: + return "echo $PSERVER_HOSTS" + file = open(file_path, 'r') + text = file.read().strip() + file.close() + return text + + +def run_instances(image_id, instance_type, count, role, cmd=""): + response = ec2client.run_instances( + ImageId=image_id, + InstanceType=instance_type, + MaxCount=count, + MinCount=count, + UserData=cmd, + DryRun=False, + InstanceInitiatedShutdownBehavior="stop", + KeyName=args.key_name, + Placement={'AvailabilityZone': args.availability_zone}, + NetworkInterfaces=[{ + 'DeviceIndex': 0, + 'SubnetId': args.subnet_id, + "AssociatePublicIpAddress": True, + 'Groups': args.security_group_ids + }], + TagSpecifications=[{ + 'ResourceType': "instance", + 'Tags': [{ + "Key": 'Task_name', + "Value": args.task_name + }, { + "Key": 'Role', + "Value": role + }] + }]) + + instance_ids = [] + for instance in response["Instances"]: + instance_ids.append(instance["InstanceId"]) + + if len(instance_ids) > 0: + logging.info(str(len(instance_ids)) + " instance(s) created") + else: + logging.info("no instance created") + #create waiter to make sure it's running + + logging.info("waiting for instance to become accessible") + waiter = ec2client.get_waiter('instance_status_ok') + waiter.wait( + Filters=[{ + "Name": "instance-status.status", + "Values": ["ok"] + }, { + "Name": "instance-status.reachability", + "Values": ["passed"] + }, { + "Name": "instance-state-name", + "Values": ["running"] + }], + InstanceIds=instance_ids) + + instances_response = ec2client.describe_instances(InstanceIds=instance_ids) + + return instances_response["Reservations"][0]["Instances"] + + +def create_pservers(): + try: + return run_instances( + image_id=args.pserver_image_id, + instance_type=args.pserver_instance_type, + count=args.pserver_count, + role="PSERVER", ) + except Exception: + logging.exception("error while trying to create pservers") + cleanup(args.task_name) + + +def log_to_file(source, filename): + if not filename in log_files: + log_files.append(filename) + with open(args.log_path + filename, "a") as log_file: + for line in iter(source.readline, ""): + log_file.write(line) + + +def create_trainers(kickoff_cmd, pserver_endpoints_str): + def create_and_start_trainer(trainer_index): + logging.info("trainer " + str(trainer_index) + " is starting") + + instance_response = run_instances( + image_id=args.trainer_image_id, + instance_type=args.trainer_instance_type, + count=1, + role="TRAINER", )[0] + trainer_ip = instance_response["PrivateIpAddress"] + + logging.info("trainer " + str(trainer_index) + " started") + + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=trainer_ip, username="ubuntu", pkey=ssh_key) + + logging.info("trainer " + str(trainer_index) + + " terminal connected via ssh") + + cmd = kickoff_cmd.format( + PSERVER_HOSTS=pserver_endpoints_str, + DOCKER_IMAGE=args.docker_image, + TRAINER_INDEX=str(trainer_index), + TASK_NAME=args.task_name, + TRAINER_COUNT=args.trainer_count, + COMMAND=args.trainer_command, + MASTER_ENDPOINT=args.master_server_ip + ":" + + str(args.master_server_port)) + logging.info(cmd) + + stdin, stdout, stderr = ssh_client.exec_command(command=cmd) + + # read and save output log + + logging.info("trainer " + str(trainer_index) + + " command executed, keep fetching log") + + stdout_thread = threading.Thread( + target=log_to_file, + args=( + stdout, + "trainer_" + str(trainer_index) + ".log", )) + stderr_thread = threading.Thread( + target=log_to_file, + args=( + stderr, + "trainer_" + str(trainer_index) + "_err.log", )) + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return_code = stdout.channel.recv_exit_status() + if return_code != 0: + trainer_create_results[trainer_index] = {'has_error': True} + raise ValueError("trainer didn't finish with exit code 0") + + ssh_client.close() + + # multi thread starting trainer instance and run kickoff command + + trainer_threads = [] + trainer_create_results = {} + try: + for i in xrange(args.trainer_count): + logging.info("starting tread for trainer " + str(i)) + trainer_thread = threading.Thread( + target=create_and_start_trainer, args=(i, )) + trainer_thread.start() + trainer_threads.append(trainer_thread) + + for trainer_thread in trainer_threads: + trainer_thread.join() + + for result in trainer_create_results: + if result["has_error"]: + logging.error( + "error during trainer starting or training, destorying the while cluster " + ) + cleanup(args.task_name) + break + + logging.info("all trainers stopped") + except Exception, e: + logging.info( + "Training exception, clean up resources, please check log for more info" + ) + finally: + cleanup(args.task_name) + + +def cleanup(task_name): + if args.no_clean_up: + logging.info("no clean up option set, going to leave the setup running") + return + #shutdown all ec2 instances + print("going to clean up " + task_name + " instances") + instances_response = ec2client.describe_instances(Filters=[{ + "Name": "tag:Task_name", + "Values": [task_name] + }]) + + instance_ids = [] + if len(instances_response["Reservations"]) > 0: + for reservation in instances_response["Reservations"]: + for instance in reservation["Instances"]: + instance_ids.append(instance["InstanceId"]) + + ec2client.terminate_instances(InstanceIds=instance_ids) + + instance_termination_waiter = ec2client.get_waiter( + 'instance_terminated') + instance_termination_waiter.wait(InstanceIds=instance_ids) + + #delete the subnet created + + subnet = ec2client.describe_subnets(Filters=[{ + "Name": "tag:Task_name", + "Values": [task_name] + }]) + + if len(subnet["Subnets"]) > 0: + ec2client.delete_subnet(SubnetId=subnet["Subnets"][0]["SubnetId"]) + # no subnet delete waiter, just leave it. + logging.info("Clearnup done") + return + + +def kickoff_pserver(host, pserver_endpoints_str): + try: + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=host, username="ubuntu", pkey=ssh_key) + cmd = (script_to_str(args.pserver_bash_file)).format( + PSERVER_HOSTS=pserver_endpoints_str, + DOCKER_IMAGE=args.docker_image, + PSERVER_PORT=args.pserver_port, + TASK_NAME=args.task_name, + COMMAND=args.pserver_command, + TRAINER_COUNT=args.trainer_count, + TRAINER_INDEX=0, + # there is no way to use 0.0.0.0:port to start pserver + # has to docker --network="host" with host ip to make this work + SERVER_ENDPOINT=host + ":" + str(args.pserver_port), + MASTER_ENDPOINT=args.master_server_ip + ":" + + str(args.master_server_port)) + logging.info(cmd) + stdin, stdout, stderr = ssh_client.exec_command(command=cmd) + + stdout_thread = threading.Thread( + target=log_to_file, args=( + stdout, + "pserver_" + host + ".log", )) + stderr_thread = threading.Thread( + target=log_to_file, args=( + stderr, + "pserver_" + host + "_err.log", )) + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return_code = stdout.channel.recv_exit_status() + logging.info(return_code) + if return_code != 0: + raise Exception("Error while kicking off pserver training process") + except Exception: + logging.exception("Error while kicking off pserver training process") + cleanup(args.task_name) + finally: + ssh_client.close() + + +def init_args(): + + if not args.task_name: + args.task_name = generate_task_name() + logging.info("task name generated %s" % (args.task_name)) + + if not args.pem_path: + args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" + if args.security_group_id: + args.security_group_ids = (args.security_group_id, ) + + args.trainers_job_done_count = 0 + + +def create_cluster(): + + if not args.subnet_id: + logging.info("creating subnet for this task") + args.subnet_id = create_subnet() + logging.info("subnet %s created" % (args.subnet_id)) + + logging.info("creating pservers") + pserver_create_response = create_pservers() + logging.info("pserver created, collecting pserver ips") + + pserver_endpoints = [] + for pserver in pserver_create_response: + pserver_endpoints.append(pserver["NetworkInterfaces"][0][ + "PrivateIpAddress"] + ":" + args.pserver_port) + + pserver_endpoints_str = ",".join(pserver_endpoints) + + logging.info("kicking off pserver training process") + pserver_threads = [] + for pserver in pserver_create_response: + pserver_thread = threading.Thread( + target=kickoff_pserver, + args=(pserver["PrivateIpAddress"], pserver_endpoints_str)) + pserver_thread.start() + pserver_threads.append(pserver_thread) + + logging.info("all pserver training process started") + + logging.info("creating trainers and kicking off trainer training process") + create_trainers( + kickoff_cmd=script_to_str(args.trainer_bash_file), + pserver_endpoints_str=pserver_endpoints_str) + + for pserver_thread in pserver_threads: + pserver_thread.join() + + logging.info("all process ended") + + +def start_server(args): + class S(BaseHTTPRequestHandler): + def _set_headers(self): + self.send_response(200) + self.send_header('Content-type', 'text/text') + self.end_headers() + + def do_HEAD(self): + self._set_headers() + + def do_404(self): + self.send_response(404) + self.send_header('Content-type', 'text/text') + self.end_headers() + logging.info("Received invalid GET request" + self.path) + self.wfile.write("NO ACTION FOUND") + + def do_GET(self): + + request_path = self.path + if request_path == "/status" or request_path == "/master_logs": + self._set_headers() + logging.info("Received request to return status") + with open(args.log_path + "master.log", "r") as logfile: + self.wfile.write(logfile.read().strip()) + elif request_path == "/list_logs" or request_path == "/logs": + self._set_headers() + self.wfile.write("\n".join(log_files)) + elif "/log/" in request_path: + self._set_headers() + log_file_path = request_path.replace("/log/", "") + logging.info("requesting log file path is" + args.log_path + + log_file_path) + with open(args.log_path + log_file_path, "r") as logfile: + self.wfile.write(logfile.read().strip()) + else: + self.do_404() + + def do_POST(self): + + request_path = self.path + + if request_path == "/save_data": + self._set_headers() + logging.info("Received request to save data") + self.wfile.write("DATA SAVED!") + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length) + if args.task_name: + with open(args.task_name + ".txt", "a") as text_file: + text_file.write(post_data + "\n") + + elif request_path == "/cleanup": + self._set_headers() + logging.info("Received request to cleanup cluster") + cleanup(args.task_name) + self.wfile.write("cleanup in progress") + + else: + self.do_404() + + server_address = ('', args.master_server_port) + httpd = HTTPServer(server_address, S) + logging.info("HTTP server is starting") + httpd.serve_forever() + + +def print_arguments(): + logging.info('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + logging.info('%s: %s' % (arg, value)) + logging.info('------------------------------------------------') + + +if __name__ == "__main__": + print_arguments() + if args.action == "create": + logging.info("going to create cluster") + if not args.key_name or not args.security_group_id: + raise ValueError("key_name and security_group_id are required") + init_args() + create_cluster() + elif args.action == "cleanup": + logging.info("going to cleanup cluster") + if not args.task_name: + raise ValueError("task_name is required") + cleanup(args.task_name) + elif args.action == "serve": + # serve mode + if not args.master_server_ip: + raise ValueError( + "No master server ip set, please run with --action create") + + logging.info("going to start serve and create cluster") + + init_args() + + logging.info("starting server in another thread") + server_thread = threading.Thread(target=start_server, args=(args, )) + server_thread.start() + + create_cluster() + server_thread.join() + elif args.action == "test": + start_server(args) diff --git a/tools/aws_benchmarking/server/logs/master.log b/tools/aws_benchmarking/server/logs/master.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/aws_benchmarking/server/pserver.sh.template b/tools/aws_benchmarking/server/pserver.sh.template new file mode 100644 index 0000000000000000000000000000000000000000..2612856d1e6273fe2642f82e8c616eb9ff24f8a4 --- /dev/null +++ b/tools/aws_benchmarking/server/pserver.sh.template @@ -0,0 +1,2 @@ +#!/bin/bash +docker run --network="host" -i -e "SERVER_ENDPOINT={SERVER_ENDPOINT}" -e "MASTER_ENDPOINT={MASTER_ENDPOINT}" -e "TASK_NAME={TASK_NAME}" -e "TRAINER_INDEX={TRAINER_INDEX}" -e "TRAINING_ROLE=PSERVER" -e "TRAINER_COUNT={TRAINER_COUNT}" -e "TRAINERS={TRAINER_COUNT}" -e "PSERVER_HOSTS={PSERVER_HOSTS}" -e "PSERVERS={PSERVER_HOSTS}" {DOCKER_IMAGE} {COMMAND} --device CPU \ No newline at end of file diff --git a/tools/aws_benchmarking/server/requirements.txt b/tools/aws_benchmarking/server/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c523854f28b0a6f024fba2b2f344b53ba967a2f --- /dev/null +++ b/tools/aws_benchmarking/server/requirements.txt @@ -0,0 +1,4 @@ +netaddr==0.7.19 +boto3==1.6.21 +namesgenerator==0.3 +paramiko==2.4.1 diff --git a/tools/aws_benchmarking/server/trainer.sh.template b/tools/aws_benchmarking/server/trainer.sh.template new file mode 100644 index 0000000000000000000000000000000000000000..a4b2876b08cdf05e90e50589f897d74ca5f90443 --- /dev/null +++ b/tools/aws_benchmarking/server/trainer.sh.template @@ -0,0 +1,2 @@ +#!/bin/bash +nvidia-docker run --network="host" -i -e "MASTER_ENDPOINT={MASTER_ENDPOINT}" -e "TASK_NAME={TASK_NAME}" -e "TRAINER_COUNT={TRAINER_COUNT}" -e "TRAINERS={TRAINER_COUNT}" -e "TRAINER_INDEX={TRAINER_INDEX}" -e "PADDLE_INIT_TRAINER_ID={TRAINER_INDEX}" -e "TRAINING_ROLE=TRAINER" -e "PSERVER_HOSTS={PSERVER_HOSTS}" -e "PSERVERS={PSERVER_HOSTS}" {DOCKER_IMAGE} {COMMAND} --device GPU \ No newline at end of file diff --git a/tools/codestyle/cpplint_pre_commit.hook b/tools/codestyle/cpplint_pre_commit.hook new file mode 100755 index 0000000000000000000000000000000000000000..94d1e23ce716f7f1d723bad5f1f4c60030f19eb7 --- /dev/null +++ b/tools/codestyle/cpplint_pre_commit.hook @@ -0,0 +1,12 @@ +#!/bin/bash + +TOTAL_ERRORS=0 + +# The trick to remove deleted files: https://stackoverflow.com/a/2413151 +for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do + cpplint $file; + TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); +done + +exit $TOTAL_ERRORS +