diff --git a/Dockerfile b/Dockerfile index 4d6165b79a1d94b8f27d7f3ee1b6e2cee5992d31..752fea5951bdc8c2cf79a17c960217c88ae62571 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y --allow-downgrades \ - git python-pip python-dev openssh-server bison \ + git python-pip python-dev python-opencv openssh-server bison \ libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ @@ -76,8 +76,7 @@ RUN easy_install -U pip && \ pip install sphinx-rtd-theme==0.1.9 recommonmark RUN pip install pre-commit 'ipython==5.3.0' && \ - pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ - pip install opencv-python + pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' #For docstring checker RUN pip install pylint pytest astroid isort diff --git a/benchmark/.gitignore b/benchmark/.gitignore index 7b66e8a5b5020fd847982db401665d24ba3a069c..fb4114356d4f37efc8ad672316fd4f99443d9fcd 100644 --- a/benchmark/.gitignore +++ b/benchmark/.gitignore @@ -7,3 +7,6 @@ paddle/rnn/imdb.pkl caffe/image/logs tensorflow/image/logs tensorflow/rnn/logs +fluid/models/*.pyc +fluid/logs +fluid/nohup.out diff --git a/benchmark/fluid/Dockerfile b/benchmark/fluid/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8298fcf95a5074bce9533e04d54dab79a1460286 --- /dev/null +++ b/benchmark/fluid/Dockerfile @@ -0,0 +1,22 @@ +FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04 +RUN apt-get update && apt-get install -y python python-pip iputils-ping libgtk2.0-dev wget vim net-tools iftop python-opencv +RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.7 /usr/lib/libcudnn.so && ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/lib/libnccl.so +RUN pip install -U pip +RUN pip install -U kubernetes paddlepaddle + +# IMPORTANT: +# Add "ENV http_proxy=http://ip:port" if your download is slow, and don't forget to unset it at runtime. + +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.cifar.train10()\npaddle.dataset.flowers.fetch()" | python' +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.mnist.train()\npaddle.dataset.mnist.test()\npaddle.dataset.imdb.fetch()" | python' +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.imikolov.fetch()" | python' +RUN pip uninstall -y paddlepaddle && mkdir /workspace + +ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/paddle_k8s /usr/bin +ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/k8s_tools.py /root + +ADD *.whl / +RUN pip install /*.whl && rm -f /*.whl && chmod +x /usr/bin/paddle_k8s + +ENV LD_LIBRARY_PATH=/usr/local/lib +ADD fluid_benchmark.py dataset.py models/ /workspace/ diff --git a/benchmark/fluid/README.md b/benchmark/fluid/README.md index 7071e9fdcd394a5a4db4d0d599610a72d98c0a3c..1b0c7dce8bd6faab0c4c59caa1cbe337483cbd16 100644 --- a/benchmark/fluid/README.md +++ b/benchmark/fluid/README.md @@ -44,11 +44,25 @@ Currently supported `--model` argument include: ## Run Distributed Benchmark on Kubernetes Cluster +You may need to build a Docker image before submitting a cluster job onto Kubernetes, or you will +have to start all those processes mannually on each node, which is not recommended. + +To build the Docker image, you need to choose a paddle "whl" package to run with, you may either +download it from +http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_en.html or +build it by your own. Once you've got the "whl" package, put it under the current directory and run: + +```bash +docker build -t [your docker image name]:[your docker image tag] . +``` + +Then push the image to a Docker registry that your Kubernetes cluster can reach. + We provide a script `kube_gen_job.py` to generate Kubernetes yaml files to submit distributed benchmark jobs to your cluster. To generate a job yaml, just run: ```bash -python kube_gen_job.py --jobname myjob --pscpu 4 --cpu 8 --gpu 8 --psmemory 20 --memory 40 --pservers 4 --trainers 4 --entry "python fluid_benchmark.py --model mnist --parallel 1 --device GPU --update_method pserver " --disttype pserver +python kube_gen_job.py --jobname myjob --pscpu 4 --cpu 8 --gpu 8 --psmemory 20 --memory 40 --pservers 4 --trainers 4 --entry "python fluid_benchmark.py --model mnist --gpus 8 --device GPU --update_method pserver " --disttype pserver ``` Then the yaml files are generated under directory `myjob`, you can run: diff --git a/benchmark/fluid/fluid_benchmark.py b/benchmark/fluid/fluid_benchmark.py index c1d458970a58bfac2a3369e8964eb100568b28f2..49f26255f315c3c368f42b367dfc6487ffa0deb5 100644 --- a/benchmark/fluid/fluid_benchmark.py +++ b/benchmark/fluid/fluid_benchmark.py @@ -40,10 +40,7 @@ def parse_args(): parser.add_argument( '--batch_size', type=int, default=32, help='The minibatch size.') parser.add_argument( - '--learning_rate', - type=float, - default=0.001, - help='The minibatch size.') + '--learning_rate', type=float, default=0.001, help='The learning rate.') # TODO(wuyi): add "--use_fake_data" option back. parser.add_argument( '--skip_batch_num', @@ -72,6 +69,11 @@ def parse_args(): type=int, default=1, help='If gpus > 1, will use ParallelExecutor to run, else use Executor.') + parser.add_argument( + '--cpus', + type=int, + default=1, + help='If cpus > 1, will use ParallelDo to run, else use Executor.') parser.add_argument( '--data_set', type=str, @@ -88,8 +90,8 @@ def parse_args(): help='If set, use nvprof for CUDA.') parser.add_argument( '--no_test', - action='store_false', - help='If set, test the testset during training.') + action='store_true', + help='If set, do not test the testset during training.') parser.add_argument( '--memory_optimize', action='store_true', @@ -231,13 +233,10 @@ def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc, train_losses.append(loss) print("Pass: %d, Iter: %d, Loss: %f\n" % (pass_id, iters, np.mean(train_losses))) - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sec\n' % - (num_samples, train_elapsed, examples_per_sec)) - print("Pass: %d, Loss: %f" % (pass_id, np.mean(train_losses))) + print_train_time(start_time, time.time(), num_samples) + print("Pass: %d, Loss: %f" % (pass_id, np.mean(train_losses))), # evaluation - if not args.no_test and batch_acc != None: + if not args.no_test and batch_acc: pass_test_acc = test(exe, infer_prog, test_reader, feeder, batch_acc) print(", Test Accuracy: %f" % pass_test_acc) @@ -315,11 +314,8 @@ def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader, if batch_id % 1 == 0: print("Pass %d, batch %d, loss %s" % (pass_id, batch_id, np.array(loss))) - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - if not args.no_test and batch_acc != None: + print_train_time(start_time, time.time(), num_samples) + if not args.no_test and batch_acc: test_acc = test(startup_exe, infer_prog, test_reader, feeder, batch_acc) print("Pass: %d, Test Accuracy: %f\n" % (pass_id, test_acc)) @@ -329,12 +325,19 @@ def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader, def print_arguments(args): vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and vars(args)['device'] == 'GPU') - print('----------- resnet Configuration Arguments -----------') + print('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): print('%s: %s' % (arg, value)) print('------------------------------------------------') +def print_train_time(start_time, end_time, num_samples): + train_elapsed = end_time - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + + def main(): args = parse_args() print_arguments(args) @@ -342,7 +345,7 @@ def main(): # the unique trainer id, starting from 0, needed by trainer # only nccl_id_var, num_trainers, trainer_id = ( - None, 1, int(os.getenv("PADDLE_TRAINER_ID", "-1"))) + None, 1, int(os.getenv("PADDLE_TRAINER_ID", "0"))) if args.use_cprof: pr = cProfile.Profile() diff --git a/benchmark/fluid/models/mnist.py b/benchmark/fluid/models/mnist.py index d264bfc12bdb159c06dae81db4949b9ee17268e2..28a38a931cf6cfcd5dd858b363b3d29b70368315 100644 --- a/benchmark/fluid/models/mnist.py +++ b/benchmark/fluid/models/mnist.py @@ -69,15 +69,30 @@ def get_model(args): images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) label = fluid.layers.data(name='label', shape=[1], dtype='int64') - # Train program - predict = cnn_model(images) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = cnn_model(pd.read_input(images)) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + # Train program + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_acc = fluid.layers.accuracy(input=predict, label=label) # inference program inference_program = fluid.default_main_program().clone() diff --git a/benchmark/fluid/models/resnet.py b/benchmark/fluid/models/resnet.py index 9dec8911ed64e09285fb461c4a12adb601535316..f951f73a35dc4dc6f796178ebbc3e2886b2d7d8c 100644 --- a/benchmark/fluid/models/resnet.py +++ b/benchmark/fluid/models/resnet.py @@ -132,18 +132,33 @@ def get_model(args): input = fluid.layers.data(name='data', shape=dshape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - predict = model(input, class_dim) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = model(pd.read_input(input), class_dim) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + predict = model(input, class_dim) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program): inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) + target_vars=[batch_acc]) optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) diff --git a/benchmark/fluid/models/stacked_dynamic_lstm.py b/benchmark/fluid/models/stacked_dynamic_lstm.py index 81a28b5f3aed0c325398b909d700c23df545824a..1b680d76a8ba1ead7c8c50065e1817c45b951b27 100644 --- a/benchmark/fluid/models/stacked_dynamic_lstm.py +++ b/benchmark/fluid/models/stacked_dynamic_lstm.py @@ -101,9 +101,8 @@ def get_model(args): loss = fluid.layers.mean(x=loss) # add acc - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_acc = fluid.layers.accuracy(input=logit, label=fluid.layers.data(name='label', \ - shape=[1], dtype='int64'), total=batch_size_tensor) + shape=[1], dtype='int64')) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program): diff --git a/benchmark/fluid/run.sh b/benchmark/fluid/run.sh index f6dfd20bf2ee0b668b6d4238d4511253b2233035..5d9b2db87135e53470b106dcd11a6bcfdc5dbda9 100644 --- a/benchmark/fluid/run.sh +++ b/benchmark/fluid/run.sh @@ -2,6 +2,7 @@ # This script benchmarking the PaddlePaddle Fluid on # single thread single GPU. +mkdir -p logs #export FLAGS_fraction_of_gpu_memory_to_use=0.0 export CUDNN_PATH=/paddle/cudnn_v5 @@ -35,71 +36,74 @@ nohup stdbuf -oL nvidia-smi \ --format=csv \ --filename=mem.log \ -l 1 & + # mnist # mnist gpu mnist 128 -FLAGS_benchmark=true stdbuf -oL python fluid/mnist.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=mnist \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=500 \ - 2>&1 | tee -a mnist_gpu_128.log + 2>&1 | tee -a logs/mnist_gpu_128.log # vgg16 # gpu cifar10 128 -FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=vgg16 \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a vgg16_gpu_128.log + 2>&1 | tee -a logs/vgg16_gpu_128.log # flowers gpu 128 -FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=vgg16 \ --device=GPU \ --batch_size=32 \ --data_set=flowers \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a vgg16_gpu_flowers_32.log + 2>&1 | tee -a logs/vgg16_gpu_flowers_32.log # resnet50 # resnet50 gpu cifar10 128 -FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=resnet \ --device=GPU \ --batch_size=128 \ --data_set=cifar10 \ - --model=resnet_cifar10 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a resnet50_gpu_128.log + 2>&1 | tee -a logs/resnet50_gpu_128.log # resnet50 gpu flowers 64 -FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=resnet \ --device=GPU \ --batch_size=64 \ --data_set=flowers \ - --model=resnet_imagenet \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a resnet50_gpu_flowers_64.log + 2>&1 | tee -a logs/resnet50_gpu_flowers_64.log # lstm # lstm gpu imdb 32 # tensorflow only support batch=32 -FLAGS_benchmark=true stdbuf -oL python fluid/stacked_dynamic_lstm.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=stacked_dynamic_lstm \ --device=GPU \ --batch_size=32 \ --skip_batch_num=5 \ --iterations=30 \ - --hidden_dim=512 \ - --emb_dim=512 \ - --crop_size=1500 \ - 2>&1 | tee -a lstm_gpu_32.log + 2>&1 | tee -a logs/lstm_gpu_32.log # seq2seq # seq2seq gpu wmb 128 -FLAGS_benchmark=true stdbuf -oL python fluid/machine_translation.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=machine_translation \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a lstm_gpu_128.log + 2>&1 | tee -a logs/lstm_gpu_128.log diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index 9459f1ddfe85f5607880d3fdd968b494d6af592a..ffdf91a354bd92bdaf3f88344f0a9256638b568c 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -33,10 +33,19 @@ ELSE() SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j ${NUM_OF_PROCESSOR} static grpc_cpp_plugin) ENDIF() +# FIXME(wuyi): do not build zlib cares protobuf twice, find a way to build grpc with them ExternalProject_Add( extern_grpc DEPENDS protobuf zlib - URL "http://paddlepaddledeps.bj.bcebos.com/grpc.tar.xz" + # NOTE(wuyi): + # this package is generated by following steps: + # 1. git clone -b v1.8.x https://github.com/grpc/grpc.git + # 2. submodule update --init + # 3. keep only zlib, cares, protobuf, boringssl under "third_party", + # checkout and clean other dirs under third_party + # 4. remove .git, and package the directory. + URL "http://paddlepaddledeps.bj.bcebos.com/grpc-v1.8.x.tar.gz" + URL_MD5 "c9c58ee7d0e8929a63155af6a2ecdbd0" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" @@ -49,7 +58,6 @@ ExternalProject_Add( INSTALL_COMMAND make prefix=${GRPC_INSTALL_DIR} install ) -# FIXME(typhoonzero): hack to get static lib path, try a better way like merge them. ADD_LIBRARY(grpc++_unsecure STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET grpc++_unsecure PROPERTY IMPORTED_LOCATION "${GRPC_INSTALL_DIR}/lib/libgrpc++_unsecure.a") diff --git a/doc/fluid/api/io.rst b/doc/fluid/api/io.rst index dd9d88b669957c22cd0a07fa4b7e219e2d6e5d61..3e956f8302d261b52f9f76ff8eb4a01f9c6381f8 100644 --- a/doc/fluid/api/io.rst +++ b/doc/fluid/api/io.rst @@ -59,3 +59,21 @@ get_inference_program .. autofunction:: paddle.fluid.io.get_inference_program :noindex: +save_checkpoint +--------------- + +.. autofunction:: paddle.fluid.io.save_checkpoint + :noindex: + +load_checkpoint +--------------- + +.. autofunction:: paddle.fluid.io.load_checkpoint + :noindex: + +clean_checkpoint +---------------- + +.. autofunction:: paddle.fluid.io.clean_checkpoint + :noindex: + diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index f53da4d194f8d2428b4121fa1bb31f3fc95a9f64..f78e6db3268e44d5f30d83508f07c4ed68106e48 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -181,6 +181,12 @@ Print .. autofunction:: paddle.fluid.layers.Print :noindex: +is_empty +-------- + +.. autofunction:: paddle.fluid.layers.is_empty + :noindex: + device ====== @@ -255,6 +261,19 @@ double_buffer .. autofunction:: paddle.fluid.layers.double_buffer :noindex: +random_data_generator +--------------------- + +.. autofunction:: paddle.fluid.layers.random_data_generator + :noindex: + +Preprocessor +------------ + +.. autoclass:: paddle.fluid.layers.Preprocessor + :members: + :noindex: + nn == @@ -594,6 +613,29 @@ roi_pool .. autofunction:: paddle.fluid.layers.roi_pool :noindex: +dice_loss +--------- + +.. autofunction:: paddle.fluid.layers.dice_loss + :noindex: + +resize_bilinear +--------------- + +.. autofunction:: paddle.fluid.layers.resize_bilinear + :noindex: + +gather +------ + +.. autofunction:: paddle.fluid.layers.gather + :noindex: + +random_crop +----------- + +.. autofunction:: paddle.fluid.layers.random_crop + :noindex: ops === @@ -742,6 +784,12 @@ sum .. autofunction:: paddle.fluid.layers.sum :noindex: +shape +----- + +.. autofunction:: paddle.fluid.layers.shape + :noindex: + sigmoid ------- @@ -991,21 +1039,3 @@ zeros .. autofunction:: paddle.fluid.layers.zeros :noindex: -topk ----- - -.. autofunction:: paddle.fluid.layers.topk - :noindex: - -dice_loss ----- - -.. autofunction:: paddle.fluid.layers.dice_loss - :noindex: - -upsampling_bilinear2d -____ - -.. autofunction:: paddle.fluid.layers.upsampling_bilinear2d - :noindex: - diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index df2bd2eace52e78805433bea320f5de95d45bfc7..6ad44bb6905b6e3f2b6e4aeb3701ced5d18e2005 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -47,28 +47,6 @@ DecayedAdagrad :members: :noindex: -Adadelta ------------------ - -.. autoclass:: paddle.fluid.optimizer.Adadelta - :members: - :noindex: - -RMSProp ------------------ - -.. autoclass:: paddle.fluid.optimizer.RMSProp - :members: - :noindex: - -ModelAverage ------------------ - -.. autoclass:: paddle.fluid.optimizer.ModelAverage - :members: - :noindex: - - SGDOptimizer ------------ @@ -111,25 +89,31 @@ DecayedAdagradOptimizer :members: :noindex: +RMSPropOptimizer +---------------- -AdadeltaOptimizer ------------------ - -.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer +.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer :members: :noindex: +Adadelta +-------- -RMSPropOptimizer ------------------ +.. autoclass:: paddle.fluid.optimizer.Adadelta + :members: + :noindex: -.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer +ModelAverage +------------ + +.. autoclass:: paddle.fluid.optimizer.ModelAverage :members: :noindex: - + Optimizer --------- .. autoclass:: paddle.fluid.optimizer.Optimizer :members: :noindex: + diff --git a/doc/fluid/api/profiler.rst b/doc/fluid/api/profiler.rst index 74d102dcb0db35766c34e3d14939a8aa5861686b..39fda65863471a78895503184848a754828b71a1 100644 --- a/doc/fluid/api/profiler.rst +++ b/doc/fluid/api/profiler.rst @@ -23,3 +23,15 @@ profiler .. autofunction:: paddle.fluid.profiler.profiler :noindex: +start_profiler +-------------- + +.. autofunction:: paddle.fluid.profiler.start_profiler + :noindex: + +stop_profiler +------------- + +.. autofunction:: paddle.fluid.profiler.stop_profiler + :noindex: + diff --git a/doc/fluid/design/concepts/var_desc.md b/doc/fluid/design/concepts/var_desc.md index 6750323c0167bf1efbde6ef4fd670e88a5aa502a..8db67f6703d142da71cf06bd4f7e2cb13556f9b0 100644 --- a/doc/fluid/design/concepts/var_desc.md +++ b/doc/fluid/design/concepts/var_desc.md @@ -35,7 +35,7 @@ The computation `Program` consists of nested `Blocks`. Each `Block` will consist ## Definition of VarType -A VarDesc should have a name, type and whether or not it is persistable. The are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: +A VarDesc should have a name, type and whether or not it is persistable. There are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: ```proto message VarDesc { diff --git a/doc/fluid/howto/cluster/fluid_recordio.md b/doc/fluid/howto/cluster/fluid_recordio.md new file mode 100644 index 0000000000000000000000000000000000000000..55ce63ec193948424cd0b87f13d56b9cf6154dfc --- /dev/null +++ b/doc/fluid/howto/cluster/fluid_recordio.md @@ -0,0 +1,127 @@ +# How to use RecordIO in Fluid + +If you want to use RecordIO as your training data format, you need to convert to your training data +to RecordIO files and reading them in the process of training, PaddlePaddle Fluid provides some +interface to deal with the RecordIO files. + +## Generate RecordIO File + +Before start training with RecordIO files, you need to convert your training data +to RecordIO format by `fluid.recordio_writer.convert_reader_to_recordio_file`, the sample codes +as follows: + +```python + reader = paddle.batch(mnist.train(), batch_size=1) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_file('./mnist.recordio', reader, feeder) +``` + +The above code snippet would generate a RecordIO `./mnist.recordio` on your host. + +**NOTE**: we recommend users to set `batch_size=1` when generating the recordio files so that users can +adjust it flexibly while reading it. + +## Use the RecordIO file in a Local Training Job + +PaddlePaddle Fluid provides an interface `fluid.layers.io.open_recordio_file` to load your RecordIO file +and then you can use them as a Layer in your network configuration, the sample codes as follows: + +```python + data_file = fluid.layers.io.open_recordio_file( + filename="./mnist.recordio", + shapes=[(-1, 784),(-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int32"]) + data_file = fluid.layers.io.batch(data_file, batch_size=4) + + img, label = fluid.layers.io.read_file(data_file) + hidden = fluid.layers.fc(input=img, size=100, act='tanh') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + + fluid.optimizer.Adam(learning_rate=1e-3).minimize(avg_loss) + + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + avg_loss_np = [] + + # train a pass + batch_id = 0 + while True: + tmp, = exe.run(fetch_list=[avg_loss]) + + avg_loss_np.append(tmp) + print(batch_id) + batch_id += 1 +``` + +## Use the RecordIO files in Distributed Training + +1. generate multiple RecordIO files + +For a distributed training job, you may have multiple trainer nodes, +and one or more RecordIO files for one trainer node, you can use the interface +`fluid.recordio_writer.convert_reader_to_recordio_files` to convert your training data +into multiple RecordIO files, the sample codes as follows: + +```python + reader = paddle.batch(mnist.train(), batch_size=1) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_files( + filename_suffix='./mnist.recordio', batch_per_file=100, reader, feeder) +``` + +The above codes would generate multiple RecordIO files on your host like: + +```bash +. + \_mnist-00000.recordio + |-mnist-00001.recordio + |-mnist-00002.recordio + |-mnist-00003.recordio + |-mnist-00004.recordio +``` + +2. open multiple RecordIO files by `fluid.layers.io.open_files` + +For a distributed training job, the distributed operator system will schedule trainer process on multiple nodes, +each trainer process reads parts of the whole training data, we usually take the following approach to make the training +data allocated by each trainer process as uniform as possiable: + +```python +def gen_train_list(file_pattern, trainers, trainer_id): + file_list = glob.glob(file_pattern) + ret_list = [] + for idx, f in enumerate(file_list): + if (idx + trainers) % trainers == trainer_id: + ret_list.append(f) + return ret_list + +trainers = int(os.getenv("TRAINERS")) +trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) +data_file = fluid.layers.io.open_files( + filenames=gen_train_list("./mnist-[0-9]*.recordio", 2, 0), + thread_num=1, + shapes=[(-1, 784),(-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int32"]) +img, label = fluid.layers.io.read_file(data_files) +... +``` diff --git a/doc/fluid/howto/optimization/benchmark/README.md b/doc/fluid/howto/optimization/benchmark/README.md deleted file mode 120000 index db30af7f53231c687f9ad61ad961a685733cbad0..0000000000000000000000000000000000000000 --- a/doc/fluid/howto/optimization/benchmark/README.md +++ /dev/null @@ -1 +0,0 @@ -../../../../../benchmark/cluster/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/benchmark/vgg16/README.md b/doc/fluid/howto/optimization/benchmark/vgg16/README.md deleted file mode 120000 index ca963ef5f06aa0c2fe507ba7548dca8017358120..0000000000000000000000000000000000000000 --- a/doc/fluid/howto/optimization/benchmark/vgg16/README.md +++ /dev/null @@ -1 +0,0 @@ -../../../../../../benchmark/cluster/vgg16/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/host_memory_profiling_cn.md b/doc/fluid/howto/optimization/host_memory_profiling_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..9b55a66ded8b48f7105c05f1462839a72ab5f904 --- /dev/null +++ b/doc/fluid/howto/optimization/host_memory_profiling_cn.md @@ -0,0 +1,89 @@ +## 堆内存分析和优化 + +计算机程序都可能有内存泄漏的风险。**内存泄漏**一般是由于程序在堆(heap)上分配了内存而没有释放,随着程序的运行占用的内存越来越大,一方面会影响程序的稳定性,可能让运行速度越来越慢,或者造成oom,甚至会影响运行程序的机器的稳定性,造成宕机。 + + +目前有很多内存泄漏分析工具,比较经典的有[valgrind](http://valgrind.org/docs/manual/quick-start.html#quick-start.intro), [gperftools](https://gperftools.github.io/gperftools/)。 + +因为Fluid是用Python驱动C++ core来运行,valgrind直接分析非常困难,需要自己编译debug版本的、带valgrind支持的专用Python版本,而且输出的信息中大部分是Python自己的符号和调用信息,分析起来很困难,另外使用valgrind会让程序运行速度变得非常慢,所以不建议使用。 + +本教程主要介绍[gperftools](https://gperftools.github.io/gperftools/)的使用。 + +gperftool主要支持以下四个功能: + +- thread-caching malloc +- heap-checking using tcmalloc +- heap-profiling using tcmalloc +- CPU profiler + +Paddle也提供了基于gperftool的[CPU性能分析教程](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/optimization/cpu_profiling_cn.md)。 + +对于堆内存的分析,主要用到thread-caching malloc和heap-profiling using tcmalloc。 + +## 使用流程 +#### 环境 +本教程基于paddle提供的Docker开发环境paddlepaddle/paddle:latest-dev,基于Ubuntu 16.04.4 LTS环境。 + +#### 使用流程 + +- 安装google-perftools + +``` +apt-get install libunwind-dev +apt-get install google-perftools +``` + +- 安装pprof + +``` +go get -u github.com/google/pprof +``` + +- 设置运行环境 + +``` +export PPROF_PATH=/root/gopath/bin/pprof +export PPROF_BINARY_PATH=/root/gopath/bin/pprof +export LD_PRELOAD=/usr/lib/libtcmalloc.so.4 +``` + +- 使用heap profile来运行python程序。本质上是周期性的对堆的分配情况做一次快照。 + +``` +# HEAPPROFILE 设置生成的堆分析文件的目录和文件前缀 +# HEAP_PROFILE_ALLOCATION_INTERVAL 设置每分配多少存储dump一次dump,默认1GB +env HEAPPROFILE="./perf_log/test.log" HEAP_PROFILE_ALLOCATION_INTERVAL=209715200 python trainer.py +``` + +随着程序的运行,会在perf_log这个文件夹下生成很多文件,如下: + +``` +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0001.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0002.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0003.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0004.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0005.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0006.heap +``` + +- 使用pprof对heap文件进行分析。分析有两种模式: + - 完整模式。会对当前heap做一个分析,显示目前分配内存一些调用路径。 + + ``` + pprof --pdf python test.log.0012.heap + ``` + 上述命令会生成一个profile00x.pdf的文件,可以直接打开,例如:[memory_cpu_allocator](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_cpu_allocator.pdf)。从下图可以看出,在CPU版本fluid的运行过程中,分配存储最多的模块式CPUAllocator. 而别的模块相对而言分配内存较少,所以被忽略了,这对于分配内存泄漏是很不方便的,因为泄漏是一个缓慢的过程,在这种图中是无法看到的。 + + ![result](https://user-images.githubusercontent.com/3048612/40964027-a54033e4-68dc-11e8-836a-144910c4bb8c.png) + + - Diff模式。可以对两个时刻的heap做diff,把一些内存分配没有发生变化的模块去掉,而把增量部分显示出来。 + ``` + pprof --pdf --base test.log.0010.heap python test.log.1045.heap + ``` + 生成的结果为:[`memory_leak_protobuf`](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_leak_protobuf.pdf) + + 从图中可以看出:ProgramDesc这个结构,在两个版本之间增长了200MB+,所以这里有很大的内存泄漏的可能性,最终结果也确实证明是这里造成了泄漏。 + + ![result](https://user-images.githubusercontent.com/3048612/40964057-b434d5e4-68dc-11e8-894b-8ab62bcf26c2.png) + ![result](https://user-images.githubusercontent.com/3048612/40964063-b7dbee44-68dc-11e8-9719-da279f86477f.png) + diff --git a/doc/mobile/cross_compiling_for_android_cn.md b/doc/mobile/cross_compiling_for_android_cn.md index cdd6917239371a660d0df05bb623f0b94f8f11a3..0607748b751e9f2d606236d9e98868335379b05c 100644 --- a/doc/mobile/cross_compiling_for_android_cn.md +++ b/doc/mobile/cross_compiling_for_android_cn.md @@ -63,16 +63,16 @@ Android的Docker开发镜像向用户提供两个可配置的参数: - 编译`armeabi-v7a`,`Android API 21`的PaddlePaddle库 ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev ./paddle/scripts/paddle_build.sh build_android ``` - 编译`arm64-v8a`,`Android API 21`的PaddlePaddle库 ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev ./paddle/scripts/paddle_build.sh build_android ``` -执行上述`docker run`命令时,容器默认执行[paddle/scripts/docker/build_android.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文[配置交叉编译参数](#配置交叉编译参数)章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 +执行上述`docker run`命令时,容器执行[paddle/scripts/paddle_build.sh build_android](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文[配置交叉编译参数](#配置交叉编译参数)章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 ## 基于Linux交叉编译环境的编译方式 本文档将以Linux x86-64平台为例,介绍交叉编译Android平台上适用的PaddlePaddle库的方法和步骤。 diff --git a/doc/mobile/cross_compiling_for_android_en.md b/doc/mobile/cross_compiling_for_android_en.md index 6af16fc114a2310e364023ec43cc3c64149af8f7..572063e8012efee2d2e142eb57e459e0e8c6382c 100644 --- a/doc/mobile/cross_compiling_for_android_en.md +++ b/doc/mobile/cross_compiling_for_android_en.md @@ -36,7 +36,7 @@ $ docker pull docker.paddlepaddlehub.com/paddle:latest-dev-android We can run the Docker image we just created to build the inference library of PaddlePaddle for Android using the command below: ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" paddle:dev-android +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" paddle:dev-android ./paddle/scripts/paddle_build.sh build_android ``` The Docker image accepts two arguments `ANDROID_ABI` and `ANDROID_API`: @@ -70,7 +70,7 @@ The Docker image accepts two arguments `ANDROID_ABI` and `ANDROID_API`: The ARM-64 architecture (`arm64-v8a`) requires at least level 21 of Android API. -The default entry-point of the Docker image, [`paddle/scripts/docker/build_android.sh`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh) generates the [Android cross-compiling standalone toolchain](https://developer.android.com/ndk/guides/standalone_toolchain.html) based on the argument: `ANDROID_ABI` or `ANDROID_API`. For information about other configuration arguments, please continue reading. +The build command, [`paddle/scripts/paddle_build.sh build_android`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh) generates the [Android cross-compiling standalone toolchain](https://developer.android.com/ndk/guides/standalone_toolchain.html) based on the argument: `ANDROID_ABI` or `ANDROID_API`. For information about other configuration arguments, please continue reading. The above command generates and outputs the inference library in `$PWD/install_android` and puts third-party libraries in `$PWD/install_android/third_party`. diff --git a/doc/v2/build_and_install/build_from_source_cn.rst b/doc/v2/build_and_install/build_from_source_cn.rst index 741c01ce5428c0046daa5a784da70d4bb492438c..de7e9eb75c3a053179f2d03ac887955bb4e0a6d2 100644 --- a/doc/v2/build_and_install/build_from_source_cn.rst +++ b/doc/v2/build_and_install/build_from_source_cn.rst @@ -23,7 +23,7 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 在 `这里 `__ 找到 paddle_manylinux_devel 镜像的编译以及使用方法。或者参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。 -如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。 +如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 :ref:`编译依赖 <_compile_deps>` 之后才能开始编译的步骤。 编译PaddlePaddle,需要执行: @@ -106,7 +106,7 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 - 学习 Docker 有多难? - 理解 Docker 并不难,大概花十分钟看一下[这篇文章](https://zhuanlan.zhihu.com/p/19902938)。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 + 理解 Docker 并不难,大概花十分钟看一下 `这篇文章 `_ 。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 - 我可以用 IDE 吗? @@ -123,7 +123,7 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 - 可以并行编译吗? - 是的。我们的 Docker image 运行一个 [Bash 脚本](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh)。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 + 是的。我们的 Docker image 运行一个 `Bash脚本 `_ 。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 - Docker 需要 sudo @@ -131,11 +131,11 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 - 在 Windows/MacOS 上编译很慢 - Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考[这个issue](https://github.com/PaddlePaddle/Paddle/issues/627)。 + Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考 `这个issue `_ 。 - 磁盘不够 - 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考[这篇文章](https://zaiste.net/posts/removing_docker_containers/)来清理这些内容。 + 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考 `这篇文章 `_ 来清理这些内容。 .. _compile_deps: @@ -211,7 +211,7 @@ PaddlePaddle可以使用cuDNN v5.1之后的任何一个版本来编译运行, 编译选项的设置 ++++++++++++++ -PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( :code:`/usr/lib:/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 +PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( :code:`/usr/lib:/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 .. code-block:: bash diff --git a/doc/v2/build_and_install/build_from_source_en.rst b/doc/v2/build_and_install/build_from_source_en.rst index b06c43e19dcfc52ad0f074a85517a16744895a3a..b08b45d43ec7f1deb2889832079a731ee724a44c 100644 --- a/doc/v2/build_and_install/build_from_source_en.rst +++ b/doc/v2/build_and_install/build_from_source_en.rst @@ -11,7 +11,7 @@ To build PaddlePaddle, you need 1. A computer -- Linux, Windows, MacOS. 2. Docker. -Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. +Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. We run all the tools by running this image. .. _build_step: @@ -26,6 +26,8 @@ you can also find how to build and use paddle_manylinux_devel Docker image from `here `__ Or you can build your own image from source as the optional step below: +If you don't wish to use docker,you need to install several compile dependencies manually as :ref:`Compile Dependencies <_compile_deps>` shows to start compilation. + .. code-block:: bash # 1. clone the source code @@ -108,7 +110,7 @@ Frequently Asked Questions - How difficult is it to learn Docker? - It takes you ten minutes to read [an introductory article](https://docs.docker.com/get-started) and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. + It takes you ten minutes to read `an introductory article `_ and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. - Can I use my favorite IDE? @@ -125,7 +127,7 @@ Frequently Asked Questions - Does Docker do parallel building? - Our building Docker image runs a [Bash script](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh), which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. + Our building Docker image runs a `Bash script `_ , which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. - Docker requires sudo @@ -133,11 +135,11 @@ Frequently Asked Questions - Docker on Windows/MacOS builds slowly - On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to [this issue](https://github.com/PaddlePaddle/Paddle/issues/627) for details. + On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to `this issue `_ for details. - Not enough disk space - Examples in this article use option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to [this article](https://zaiste.net/posts/removing_docker_containers/). + Examples in this article use option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to `this article `_ . .. _compile_deps: diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/contrib/inference/CMakeLists.txt index 6847f7db7fc0f6b41ced1260d409ca6eba9b53eb..1e3bb7bf16f969255dba6f6ec7a6a70bbb1e07ee 100644 --- a/paddle/contrib/inference/CMakeLists.txt +++ b/paddle/contrib/inference/CMakeLists.txt @@ -17,33 +17,77 @@ if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") endif(APPLE) +set(ANAKIN_INCLUDE "" CACHE STRING "root of Anakin header files") +set(ANAKIN_LIBRARY "" CACHE STRING "path of Anakin library") + + +set(inference_deps paddle_inference_api paddle_fluid_api) + +# if anakin is set enable anakin api implementation +if(ANAKIN_INCLUDE_DIR AND ANAKIN_LIBRARY) + set(ANAKIN_FOUND ON) +else() + set(ANAKIN_FOUND OFF) +endif() + +if (ANAKIN_FOUND) + # Anakin's code style doesn't follow google c style. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=comment + -Wno-error=reorder + -Wno-error=format + -Wno-error=switch + -Wno-error=return-type + -Wno-error=non-virtual-dtor + -Wno-error=cpp") + + message(STATUS "Anakin for inference is enabled") + message(STATUS "Anakin is set INCLUDE:${ANAKIN_INCLUDE} LIBRARY:${ANAKIN_LIBRARY}") + include_directories("${ANAKIN_INCLUDE}") + # Anakin's source path is a mass, need to set sub-directories trivially. + include_directories("${ANAKIN_INCLUDE}/saber") + link_directories("${ANAKIN_LIBRARY}") + + nv_library(inference_anakin_api SRCS paddle_inference_api_anakin_engine.cc) + target_link_libraries(inference_anakin_api anakin) + list(APPEND inference_deps inference_anakin_api) +endif() + + function(inference_api_test TARGET_NAME) - set(options "") - set(oneValueArgs "") - set(multiValueArgs ARGS) - cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) - cc_test(test_paddle_inference_${TARGET_NAME} - SRCS test_paddle_inference_${TARGET_NAME}.cc - DEPS paddle_fluid_api paddle_inference_api - ARGS --dirname=${PYTHON_TESTS_DIR}/book/) - if(inference_test_ARGS) - set_tests_properties(test_paddle_inference_${TARGET_NAME} - PROPERTIES DEPENDS "${inference_test_ARGS}") - endif() -endfunction(inference_api_test) + if (WITH_TESTING) + set(options "") + set(oneValueArgs "") + set(multiValueArgs ARGS) + cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) + cc_test(${TARGET_NAME} + SRCS ${TARGET_NAME}.cc + DEPS "${inference_deps}" + ARGS --dirname=${PYTHON_TESTS_DIR}/book/) + if(inference_test_ARGS) + set_tests_properties(${TARGET_NAME} + PROPERTIES DEPENDS "${inference_test_ARGS}") + endif() + endif(WITH_TESTING) +endfunction(inference_api_test) cc_library(paddle_inference_api SRCS paddle_inference_api.cc paddle_inference_api_impl.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) -if(WITH_TESTING) - cc_test(test_paddle_inference_api - SRCS test_paddle_inference_api.cc - DEPS paddle_inference_api) +cc_test(test_paddle_inference_api + SRCS test_paddle_inference_api.cc + DEPS paddle_inference_api) - inference_api_test(api_impl - ARGS test_word2vec test_image_classification) +inference_api_test(test_paddle_inference_api_impl + ARGS test_word2vec test_image_classification) + +if (ANAKIN_FOUND) + nv_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc + DEPS ${inference_deps} protobuf) +endif() + +if(WITH_TESTING) + add_subdirectory(demo) endif() diff --git a/paddle/contrib/inference/demo/CMakeLists.txt b/paddle/contrib/inference/demo/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b0fa77ad13c19f177e5b2446bcda6551471e45f --- /dev/null +++ b/paddle/contrib/inference/demo/CMakeLists.txt @@ -0,0 +1,16 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +inference_api_test(simple_on_word2vec ARGS test_word2vec) diff --git a/paddle/contrib/inference/demo/simple_on_word2vec.cc b/paddle/contrib/inference/demo/simple_on_word2vec.cc new file mode 100644 index 0000000000000000000000000000000000000000..9b4843f714f11484860056711fd223edc8a5d037 --- /dev/null +++ b/paddle/contrib/inference/demo/simple_on_word2vec.cc @@ -0,0 +1,74 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains a simple demo for how to take a model for inference. + */ + +#include +#include +#include +#include "paddle/contrib/inference/paddle_inference_api.h" + +namespace paddle { +namespace demo { + +DEFINE_string(dirname, "", "Directory of the inference model."); + +void Main(bool use_gpu) { + //# 1. Create PaddlePredictor with a config. + NativeConfig config; + config.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config.use_gpu = use_gpu; + config.fraction_of_gpu_memory = 0.15; + config.device = 0; + auto predictor = + CreatePaddlePredictor(config); + + for (int batch_id = 0; batch_id < 3; batch_id++) { + //# 2. Prepare input. + int64_t data[4] = {1, 2, 3, 4}; + + PaddleBuf buf{.data = data, .length = sizeof(data)}; + PaddleTensor tensor{.name = "", + .shape = std::vector({4, 1}), + .data = buf, + .dtype = PaddleDType::INT64}; + + // For simplicity, we set all the slots with the same data. + std::vector slots(4, tensor); + + //# 3. Run + std::vector outputs; + CHECK(predictor->Run(slots, &outputs)); + + //# 4. Get output. + ASSERT_EQ(outputs.size(), 1UL); + LOG(INFO) << "output buffer size: " << outputs.front().data.length; + const size_t num_elements = outputs.front().data.length / sizeof(float); + // The outputs' buffers are in CPU memory. + for (size_t i = 0; i < std::min(5UL, num_elements); i++) { + LOG(INFO) << static_cast(outputs.front().data.data)[i]; + } + } +} + +TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); } + +#ifdef PADDLE_WITH_CUDA +TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); } +#endif + +} // namespace demo +} // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api.h b/paddle/contrib/inference/paddle_inference_api.h index b4c7f9bef4d2e83038ff223614a89e1b0493fc6f..c4588cf04030b9627dbe9b40c1bb04d1e782ebba 100644 --- a/paddle/contrib/inference/paddle_inference_api.h +++ b/paddle/contrib/inference/paddle_inference_api.h @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ /* * This file contains the definition of a simple Inference API for Paddle. @@ -40,20 +40,30 @@ struct PaddleBuf { struct PaddleTensor { std::string name; // variable name. std::vector shape; + // TODO(Superjomn) for LoD support, add a vector> field if needed. PaddleBuf data; // blob of data. PaddleDType dtype; }; +enum class PaddleEngineKind { + kNative = 0, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + // TODO(Superjomn) support following engines latter. + // kTensorRT, // Use TensorRT for inference. + // kAutoMixedAnakin, // Automatically mix Fluid with Anakin. + // kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. +}; + /* * A simple Inference API for Paddle. Currently this API can be used by * non-sequence scenerios. - * TODO(Superjomn) Support another API for NLP-related usages. */ class PaddlePredictor { public: struct Config; PaddlePredictor() = default; PaddlePredictor(const PaddlePredictor&) = delete; + PaddlePredictor& operator=(const PaddlePredictor&) = delete; // Predict an record. // The caller should be responsible for allocating and releasing the memory of @@ -67,16 +77,7 @@ class PaddlePredictor { virtual std::unique_ptr Clone() = 0; // Destroy the Predictor. - virtual ~PaddlePredictor() {} - - enum class EngineKind { - kNative = -1, // Use the native Fluid facility. - // TODO(Superjomn) support latter. - // kAnakin, // Use Anakin for inference. - // kTensorRT, // Use TensorRT for inference. - // kAutoMixedAnakin, // Automatically mix Fluid with Anakin. - // kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. - }; + virtual ~PaddlePredictor() = default; // The common configs for all the predictors. struct Config { @@ -86,18 +87,31 @@ class PaddlePredictor { }; struct NativeConfig : public PaddlePredictor::Config { + // GPU related fields. bool use_gpu{false}; - int device; - float fraction_of_gpu_memory; + int device{0}; + float fraction_of_gpu_memory{-1.f}; // Negative to notify initialization. + std::string prog_file; std::string param_file; - bool share_variables; }; -// A factory to help create difference predictor. -template < - typename ConfigT, - PaddlePredictor::EngineKind engine = PaddlePredictor::EngineKind::kNative> +// Configurations for Anakin engine. +struct AnakinConfig : public PaddlePredictor::Config { + int device; + std::string model_file; + int max_batch_size{-1}; +}; + +// A factory to help create different predictors. +// +// FOR EXTENSION DEVELOPER: +// Different predictors are designated by config type and engine kind. Similar +// configs can be merged, but there shouldn't be a huge config containing +// different fields for more than one kind of predictors. +// +// Similarly, each engine kind should map to a unique predictor implementation. +template std::unique_ptr CreatePaddlePredictor(const ConfigT& config); } // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine.cc b/paddle/contrib/inference/paddle_inference_api_anakin_engine.cc new file mode 100644 index 0000000000000000000000000000000000000000..865d7ac10db55ce9565f4b1a35defa2a3d1d40ef --- /dev/null +++ b/paddle/contrib/inference/paddle_inference_api_anakin_engine.cc @@ -0,0 +1,82 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/contrib/inference/paddle_inference_api_anakin_engine.h" + +namespace paddle { + +PaddleInferenceAnakinPredictor::PaddleInferenceAnakinPredictor( + const AnakinConfig &config) { + CHECK(Init(config)); +} + +bool PaddleInferenceAnakinPredictor::Init(const AnakinConfig &config) { + // TODO(Superjomn) Tell anakin to support return code. + engine_.Build(config.model_file, config.max_batch_size); + return true; +} + +bool PaddleInferenceAnakinPredictor::Run( + const std::vector &inputs, + std::vector *output_data) { + for (const auto &input : inputs) { + if (input.dtype != PaddleDType::FLOAT32) { + LOG(ERROR) << "Only support float type inputs. " << input.name + << "'s type is not float"; + return false; + } + engine_.SetInputFromCPU( + input.name, static_cast(input.data.data), input.data.length); + } + + // TODO(Superjomn) Tell anakin to support return code. + engine_.Execute(); + + if (output_data->empty()) { + LOG(ERROR) << "At least one output should be set with tensors' names."; + return false; + } + for (auto &output : *output_data) { + auto *tensor = engine_.GetOutputInGPU(output.name); + output.shape = tensor->shape(); + // Copy data from GPU -> CPU + if (cudaMemcpy(output.data.data, + tensor->data(), + tensor->size(), + cudaMemcpyDeviceToHost) != 0) { + LOG(ERROR) << "copy data from GPU to CPU error"; + return false; + } + } + return true; +} + +// TODO(Superjomn) To implement latter. +std::unique_ptr PaddleInferenceAnakinPredictor::Clone() { + return nullptr; +} + +// A factory to help create difference predictor. +template <> +std::unique_ptr +CreatePaddlePredictor( + const AnakinConfig &config) { + std::unique_ptr x( + new PaddleInferenceAnakinPredictor(config)); + return x; +}; + +} // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine.h b/paddle/contrib/inference/paddle_inference_api_anakin_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..fe9f562e9d1d40c30585bcb68fa51e445bedb4aa --- /dev/null +++ b/paddle/contrib/inference/paddle_inference_api_anakin_engine.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains the implementation of inference API with Anakin engine + * embeded, this API can only support Anakin models. + */ + +#pragma once + +// NOTE This header file do not have namespace. +// TODO(Superjomn) Tell Anakin to provide better APIs. +#include +#include "paddle/contrib/inference/paddle_inference_api.h" + +namespace paddle { + +class PaddleInferenceAnakinPredictor : public PaddlePredictor { + public: + PaddleInferenceAnakinPredictor(const AnakinConfig& config); + + // NOTE Unlike the native engine, the buffers of anakin engine's output_data + // should be allocated first. + // TODO(Superjomn) should unify all the behaviors of output_data accross all + // the engines. + bool Run(const std::vector& inputs, + std::vector* output_data) override; + + std::unique_ptr Clone() override; + + private: + bool Init(const AnakinConfig& config); + + anakin::AnakinEngine + engine_; +}; + +} // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc b/paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..43324bc67cba16c36d9dbcb58ccde1c57293085e --- /dev/null +++ b/paddle/contrib/inference/paddle_inference_api_anakin_engine_tester.cc @@ -0,0 +1,27 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/contrib/inference/paddle_inference_api.h" +#include + +namespace paddle { + +TEST(inference, anakin) { + AnakinConfig config; + + auto engine = + CreatePaddlePredictor(config); +} + +} // namespace paddle diff --git a/paddle/contrib/inference/paddle_inference_api_impl.cc b/paddle/contrib/inference/paddle_inference_api_impl.cc index 989252f69e42778dfd791cdee02c550f2aa78803..bda2981a14482e2c4a29773d37b074506cc344b1 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/paddle_inference_api_impl.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include @@ -54,18 +54,24 @@ std::string num2str(T a) { } } // namespace -bool NativePaddlePredictor::Init() { +bool NativePaddlePredictor::Init( + std::shared_ptr parent_scope) { VLOG(3) << "Predictor::init()"; - // TODO(panyx0718): Should CPU vs GPU device be decided by id? - if (config_.device >= 0) { + if (config_.use_gpu) { place_ = paddle::platform::CUDAPlace(config_.device); } else { place_ = paddle::platform::CPUPlace(); } - paddle::framework::InitDevices(false); + if (parent_scope) { + scope_ = parent_scope; + sub_scope_ = &(parent_scope->NewScope()); + } else { + paddle::framework::InitDevices(false); + scope_.reset(new paddle::framework::Scope()); + } + executor_.reset(new paddle::framework::Executor(place_)); - scope_.reset(new paddle::framework::Scope()); // Initialize the inference program if (!config_.model_dir.empty()) { @@ -84,18 +90,22 @@ bool NativePaddlePredictor::Init() { return false; } ctx_ = executor_->Prepare(*inference_program_, 0); + executor_->CreateVariables( + *inference_program_, sub_scope_ ? sub_scope_ : scope_.get(), 0); - // Create variables - // TODO(panyx0718): Why need to test share_variables here? - if (config_.share_variables) { - executor_->CreateVariables(*inference_program_, scope_.get(), 0); - } // Get the feed_target_names and fetch_target_names feed_target_names_ = inference_program_->GetFeedTargetNames(); fetch_target_names_ = inference_program_->GetFetchTargetNames(); return true; } +NativePaddlePredictor::~NativePaddlePredictor() { + if (sub_scope_) { + PADDLE_ENFORCE_NOT_NULL(scope_, "Should have parent scope!"); + scope_->DeleteScope(sub_scope_); + } +}; + bool NativePaddlePredictor::Run(const std::vector &inputs, std::vector *output_data) { VLOG(3) << "Predictor::predict"; @@ -120,11 +130,12 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } // Run the inference program // if share variables, we need not create variables - executor_->RunPreparedContext(ctx_.get(), - scope_.get(), - &feed_targets, - &fetch_targets, - !config_.share_variables); + executor_->RunPreparedContext( + ctx_.get(), + sub_scope_ != nullptr ? sub_scope_ : scope_.get(), + &feed_targets, + &fetch_targets, + false /* don't create variable eatch time */); if (!GetFetch(fetchs, output_data)) { LOG(ERROR) << "fail to get fetchs"; return false; @@ -137,7 +148,7 @@ std::unique_ptr NativePaddlePredictor::Clone() { VLOG(3) << "Predictor::clone"; std::unique_ptr cls(new NativePaddlePredictor(config_)); - if (!dynamic_cast(cls.get())->Init()) { + if (!dynamic_cast(cls.get())->Init(scope_)) { LOG(ERROR) << "fail to call Init"; return nullptr; } @@ -242,11 +253,16 @@ bool NativePaddlePredictor::GetFetch( template <> std::unique_ptr -CreatePaddlePredictor( +CreatePaddlePredictor( const NativeConfig &config) { VLOG(3) << "create NativePaddlePredictor"; if (config.use_gpu) { // 1. GPU memeroy + PADDLE_ENFORCE_GT( + config.fraction_of_gpu_memory, + 0.f, + "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); + PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); std::vector flags; if (config.fraction_of_gpu_memory >= 0.0f || config.fraction_of_gpu_memory <= 0.95f) { @@ -260,7 +276,7 @@ CreatePaddlePredictor( } std::unique_ptr predictor(new NativePaddlePredictor(config)); - if (!dynamic_cast(predictor.get())->Init()) { + if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; } return std::move(predictor); diff --git a/paddle/contrib/inference/paddle_inference_api_impl.h b/paddle/contrib/inference/paddle_inference_api_impl.h index 84707e223d7aa3d1ebca933923e932b3973613ae..86d1db7bcc7567e104cd20c9f767ed4513f611f5 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.h +++ b/paddle/contrib/inference/paddle_inference_api_impl.h @@ -34,14 +34,15 @@ class NativePaddlePredictor : public PaddlePredictor { explicit NativePaddlePredictor(const NativeConfig &config) : config_(config) {} - bool Init(); + // will only create sub scope if have global scope + bool Init(std::shared_ptr parent_scope); bool Run(const std::vector &inputs, std::vector *output_data) override; std::unique_ptr Clone() override; - ~NativePaddlePredictor() override{}; + ~NativePaddlePredictor() override; private: bool SetFeed(const std::vector &input_datas, @@ -52,11 +53,13 @@ class NativePaddlePredictor : public PaddlePredictor { NativeConfig config_; platform::Place place_; std::unique_ptr executor_; - std::unique_ptr scope_; + std::shared_ptr scope_; std::unique_ptr ctx_; std::unique_ptr inference_program_; std::vector feed_target_names_; std::vector fetch_target_names_; + // Do not use unique_ptr, use parent scope to delete + framework::Scope *sub_scope_{nullptr}; }; } // namespace paddle diff --git a/paddle/contrib/inference/test_paddle_inference_api_impl.cc b/paddle/contrib/inference/test_paddle_inference_api_impl.cc index 5240fc2f20211ac5d38c57b71db31d04a6dc536a..1f960677163988be6f4c502738861bf86588f406 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/test_paddle_inference_api_impl.cc @@ -47,7 +47,6 @@ NativeConfig GetConfig() { config.fraction_of_gpu_memory = 0.15; config.use_gpu = true; config.device = 0; - config.share_variables = true; return config; } @@ -75,7 +74,7 @@ TEST(paddle_inference_api_impl, word2vec) { ASSERT_EQ(outputs.size(), 1UL); size_t len = outputs[0].data.length; float* data = static_cast(outputs[0].data.data); - for (int j = 0; j < len / sizeof(float); ++j) { + for (size_t j = 0; j < len / sizeof(float); ++j) { ASSERT_LT(data[j], 1.0); ASSERT_GT(data[j], -1.0); } @@ -93,7 +92,7 @@ TEST(paddle_inference_api_impl, word2vec) { TestInference(config.model_dir, cpu_feeds, cpu_fetchs1); float* lod_data = output1.data(); - for (size_t i = 0; i < output1.numel(); ++i) { + for (int i = 0; i < output1.numel(); ++i) { EXPECT_LT(lod_data[i] - data[i], 1e-3); EXPECT_GT(lod_data[i] - data[i], -1e-3); } diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index ed1e70c6460b513c1d2e1add18ac037f71d36944..dbd375aa31bfbdcb109b6302acf23b3bb3b6befe 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -87,7 +87,7 @@ cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method) -cc_library(parallel_executor SRCS parallel_executor.cc DEPS multi_devices_graph_builder threaded_ssa_graph_executor) +cc_library(parallel_executor SRCS parallel_executor.cc DEPS multi_devices_graph_builder threaded_ssa_graph_executor scope_buffered_ssa_graph_executor) cc_library(prune SRCS prune.cc DEPS framework_proto) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index e7842e9b8130d35e511e02dfb1dc27f307d17f38..f537e4b9e569dd4c513ac0efde7240833bcf04b6 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -169,17 +169,13 @@ void BlockDesc::Flush() { } if (need_update_) { - auto &op_field = *this->desc_->mutable_ops(); - this->ClearPBOps(); - op_field.Reserve(static_cast(ops_.size())); + this->desc_->mutable_ops()->Clear(); for (auto &op_desc : ops_) { - op_field.AddAllocated(op_desc->Proto()); + this->desc_->mutable_ops()->Add()->CopyFrom(*op_desc->Proto()); } - auto &var_field = *this->desc_->mutable_vars(); - this->ClearPBVars(); - var_field.Reserve(static_cast(vars_.size())); + this->desc_->mutable_vars()->Clear(); for (auto &var_desc : vars_) { - var_field.AddAllocated(var_desc.second->Proto()); + this->desc_->mutable_vars()->Add()->CopyFrom(*var_desc.second->Proto()); } need_update_ = false; } @@ -217,22 +213,6 @@ BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, } } -void BlockDesc::ClearPBOps() { - auto ops = this->desc_->mutable_ops(); - while (!ops->empty()) { - // we do not own the OpDesc, so release the ownership. - ops->ReleaseLast(); - } -} - -void BlockDesc::ClearPBVars() { - auto vars = this->desc_->mutable_vars(); - while (!vars->empty()) { - // we do not own the VarDesc, so release the ownership. - vars->ReleaseLast(); - } -} - void BlockDesc::SetForwardBlockID(int32_t forward_block_id) { PADDLE_ENFORCE(!desc_->has_forward_block_idx(), "Parent block ID has been set to %d. Cannot set to %d", diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 189dd6c52f85b5bf623b98c64c07c0c7269505d4..ce48548418478cc5c9f9ca1244df9e66dca884e6 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -41,11 +41,6 @@ class BlockDesc { BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, ProgramDesc *prog); - ~BlockDesc() { - this->ClearPBVars(); - this->ClearPBOps(); - } - int32_t ID() const { return desc_->idx(); } int32_t Parent() const { return desc_->parent_idx(); } @@ -113,10 +108,6 @@ class BlockDesc { ProgramDesc *Program() const { return this->prog_; } - private: - void ClearPBOps(); - void ClearPBVars(); - private: ProgramDesc *prog_; // not_own proto::BlockDesc *desc_; // not_own diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 1bcd8412eb2d618b923bcd0557d118af62271f4a..c026e6c100a303b43650f08cd12d7260258c8f7e 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -36,5 +36,6 @@ cc_test(broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_ha device_context broadcast_op_handle) cc_test(gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory device_context gather_op_handle) +cc_library(scope_buffered_ssa_graph_executor SRCS scope_buffered_ssa_graph_executor.cc DEPS ssa_graph_executor) #cc_test(reduce_op_handle_test SRCS reduce_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory # device_context reduce_op_handle ) diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h index e8d510ec955602b5a3f73ca06caa121886eb150b..e7aa74742f827efabff1189d3213edd748d9082d 100644 --- a/paddle/fluid/framework/details/execution_strategy.h +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -22,6 +22,7 @@ struct ExecutionStrategy { size_t num_threads_{0}; bool use_event_{true}; bool allow_op_delay_{false}; + size_t num_iteration_per_drop_scope_{100}; }; } // namespace details diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc new file mode 100644 index 0000000000000000000000000000000000000000..eb4e7ec52f907f9403e21ec2734d61824f51a58b --- /dev/null +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" +#include +#include +#include "paddle/fluid/framework/executor.h" + +namespace paddle { +namespace framework { +namespace details { +ScopeBufferedSSAGraphExecutor::ScopeBufferedSSAGraphExecutor( + ExecutionStrategy strategy, std::vector local_scopes, + std::vector var_infos, std::vector places, + std::unique_ptr &&underlying_executor) + : strategy_(std::move(strategy)), + underlying_executor_(std::move(underlying_executor)), + local_scopes_(std::move(local_scopes)), + var_infos_(std::move(var_infos)), + places_(std::move(places)) {} + +FeedFetchList ScopeBufferedSSAGraphExecutor::Run( + const std::vector &fetch_tensors) { + if (drop_scope_counter_ == 0) { + // Create local scopes. + for (auto it = local_scopes_.rbegin(); it != local_scopes_.rend(); ++it) { + auto &scope = *it; + Scope &local_scope = scope->NewScope(); + *scope->Var(details::kLocalExecScopeName)->GetMutable() = + &local_scope; + + for (auto &info : var_infos_) { + if (scope->FindVar(info.name_) != nullptr) { + continue; + } + + if (info.persistable_) { // Persistable + InitializeVariable(scope->Var(info.name_), info.type_); + } else { + InitializeVariable(local_scope.Var(info.name_), info.type_); + } + } + } + } + + auto fetch_data = underlying_executor_->Run(fetch_tensors); + drop_scope_counter_ += 1; + if (!fetch_tensors.empty() || + drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) { + drop_scope_counter_ = 0; + // Wait All computational streams + for (auto p : places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); + } + for (auto &scope : local_scopes_) { + auto &local_scope = + *scope->Var(details::kLocalExecScopeName)->GetMutable(); + scope->DeleteScope(local_scope); + } + } + return fetch_data; +} +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..20df7a4722d589ffd168f842e927cff8411096bb --- /dev/null +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h @@ -0,0 +1,53 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/details/execution_strategy.h" +#include "paddle/fluid/framework/details/ssa_graph_executor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/place.h" +namespace paddle { +namespace framework { +namespace details { + +struct VariableInfo { + std::string name_; + proto::VarType::Type type_; + bool persistable_; +}; + +class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor { + public: + ScopeBufferedSSAGraphExecutor( + ExecutionStrategy strategy, std::vector local_scopes, + std::vector var_infos, std::vector places, + std::unique_ptr&& underlying_executor); + FeedFetchList Run(const std::vector& fetch_tensors) override; + + private: + size_t drop_scope_counter_{0}; + + ExecutionStrategy strategy_; + std::unique_ptr underlying_executor_; + std::vector local_scopes_; + std::vector var_infos_; + std::vector places_; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc index 8da6ca889b89999e0f6f974503cea476c9de97f3..09b97bd0d98dc4ad1124dcbc495cff921bf03efc 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -17,10 +17,6 @@ namespace paddle { namespace framework { namespace details { - -SSAGraphExecutor::SSAGraphExecutor(std::unique_ptr &&graph) - : graph_(std::move(graph)) {} - SSAGraphExecutor::~SSAGraphExecutor() {} } // namespace details diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h index a8833b7388ab907020a260d356f1484ffd227658..958086033607a4ed8fb840f5b14fe5779625bd82 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.h +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -28,15 +28,11 @@ class SSAGraphExecutor { DISABLE_COPY_AND_ASSIGN(SSAGraphExecutor); public: - // Steal graph inside - explicit SSAGraphExecutor(std::unique_ptr &&graph); + SSAGraphExecutor() {} virtual ~SSAGraphExecutor(); virtual FeedFetchList Run(const std::vector &fetch_tensors) = 0; - - protected: - std::unique_ptr graph_; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 815f739371e77d953a28be99b38ec1b8ff26506c..496fadd04dac982b87b9d9e14f599ed37d9709d0 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -21,7 +21,7 @@ ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, std::unique_ptr &&graph) - : SSAGraphExecutor(std::move(graph)), + : graph_(std::move(graph)), pool_(strategy.num_threads_ >= 2 ? new ::ThreadPool(strategy.num_threads_) : nullptr), local_scopes_(local_scopes), @@ -189,7 +189,9 @@ void ThreadedSSAGraphExecutor::RunOp( BlockingQueue *ready_var_q, details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { try { - VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); + if (VLOG_IS_ON(10)) { + VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); + } op->Run(strategy_.use_event_); VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index 1f7f88d75218e757e4555ad093f3cd6558f624dd..4a2075f1cccb3211316567197da56c01d26f35ce 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -51,6 +51,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { details::OpHandleBase *op); private: + std::unique_ptr graph_; std::unique_ptr<::ThreadPool> pool_; std::vector local_scopes_; std::vector places_; diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 863053c32b190f4e8497b16f3edd76cb2f76168b..3d68c5fb870d5b575f97eeb286528544402b8ed9 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -220,8 +220,10 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, has_fetch_operators(program.Block(0), *fetch_targets, fetch_holder_name); ProgramDesc* copy_program = const_cast(&program); + std::unique_ptr unique_ptr_of_copy_program; if (!has_feed_ops || !has_fetch_ops) { - copy_program = std::unique_ptr(new ProgramDesc(program)).get(); + unique_ptr_of_copy_program.reset(new ProgramDesc(program)); + copy_program = unique_ptr_of_copy_program.get(); } auto* global_block = copy_program->MutableBlock(0); diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 50c3468d556bfe05d6c41906cf35cb671f711b1e..003304b85af00165d54efbb199be01b2c5106768 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -23,6 +23,7 @@ limitations under the License. */ #endif #include "paddle/fluid/framework/details/multi_devices_graph_builder.h" +#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/platform/profiler.h" @@ -42,8 +43,6 @@ class ParallelExecutorPrivate { #ifdef PADDLE_WITH_CUDA std::unique_ptr nccl_ctxs_; #endif - - std::vector> var_types_; bool own_local_scope; }; @@ -92,9 +91,18 @@ ParallelExecutor::ParallelExecutor( local_scopes.empty()) { // Is CUDA BCastParamsToGPUs(bcast_vars); } -// Startup Program has been run. All local scopes has correct parameters. + // Startup Program has been run. All local scopes has correct parameters. + + // Step 2. Create vars in each scope; + std::vector var_infos; + for (auto *var : main_program.Block(0).AllVars()) { + var_infos.emplace_back(); + var_infos.back().name_ = var->Name(); + var_infos.back().type_ = var->GetType(); + var_infos.back().persistable_ = var->Persistable(); + } -// Step 2. Convert main_program to SSA form and dependency graph. Also, insert +// Step 3. Convert main_program to SSA form and dependency graph. Also, insert // ncclOp #ifdef PADDLE_WITH_CUDA details::MultiDevSSAGraphBuilder builder( @@ -105,16 +113,15 @@ ParallelExecutor::ParallelExecutor( params, member_->local_scopes_, build_strategy); #endif + auto graph = builder.Build(main_program); member_->executor_.reset(new details::ThreadedSSAGraphExecutor( exec_strategy, member_->local_scopes_, places, std::move(graph))); - // Step 3. Create vars in each scope; - for (auto *var : main_program.Block(0).AllVars()) { - member_->var_types_.emplace_back(var->Name(), var->GetType(), - var->Persistable()); - } + member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor( + exec_strategy, member_->local_scopes_, std::move(var_infos), + member_->places_, std::move(member_->executor_))); } void ParallelExecutor::BCastParamsToGPUs( @@ -169,42 +176,9 @@ void ParallelExecutor::BCastParamsToGPUs( void ParallelExecutor::Run(const std::vector &fetch_tensors, const std::string &fetched_var_name) { platform::RecordBlock b(0); - // Create local scopes. - for (auto it = member_->local_scopes_.rbegin(); - it != member_->local_scopes_.rend(); ++it) { - auto &scope = *it; - Scope &local_scope = scope->NewScope(); - *scope->Var(details::kLocalExecScopeName)->GetMutable() = - &local_scope; - - for (auto &name_type_pair : member_->var_types_) { - if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) { - continue; - } - - if (std::get<2>(name_type_pair)) { // Persistable - InitializeVariable(scope->Var(std::get<0>(name_type_pair)), - std::get<1>(name_type_pair)); - } else { - InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)), - std::get<1>(name_type_pair)); - } - } - } - auto fetch_data = member_->executor_->Run(fetch_tensors); *member_->global_scope_->Var(fetched_var_name)->GetMutable() = fetch_data; - - // Wait All computational streams - for (auto p : member_->places_) { - platform::DeviceContextPool::Instance().Get(p)->Wait(); - } - for (auto &scope : member_->local_scopes_) { - auto &local_scope = - *scope->Var(details::kLocalExecScopeName)->GetMutable(); - scope->DeleteScope(local_scope); - } } void ParallelExecutor::FeedTensorsIntoLocalScopes( diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index 9faf5bb3036775a2ba0c08d3d6ea17ffa73753c6..50835784440bfa177e38f9760bb4a47ad335a9e1 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -15,3 +15,9 @@ cc_test(test_subgraph_splitter DEPS analysis paddle_fluid tensor ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model) set_tests_properties(test_subgraph_splitter PROPERTIES DEPENDS test_word2vec) + +cc_test(test_dfg_graphviz_draw_pass + SRCS dfg_graphviz_draw_pass_tester.cc + DEPS analysis + ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model) +set_tests_properties(test_dfg_graphviz_draw_pass PROPERTIES DEPENDS test_word2vec) diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..41d4475382befa1bdaf7473520d64005a472a459 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h @@ -0,0 +1,68 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file create an DFG_GraphvizDrawPass which helps to draw a data flow + * graph's structure using graphviz. + */ + +#pragma once + +#include +#include +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Output a dot file and write to some place. + */ +class DFG_GraphvizDrawPass : public DataFlowGraphPass { + public: + DFG_GraphvizDrawPass(const std::string& dir, const std::string& id) + : dir_(dir), id_(id) {} + + bool Initialize() override { return Pass::Initialize(); } + void Run(DataFlowGraph* graph) override { + auto content = Draw(graph); + std::ofstream file(GenDotPath()); + file.write(content.c_str(), content.size()); + file.close(); + LOG(INFO) << "draw dot to " << GenDotPath(); + } + + bool Finalize() override { return Pass::Finalize(); } + + Pass* CreatePrinterPass(std::ostream& os, + const std::string& banner) const override { + return nullptr; + } + + private: + // Path of the dot file to output. + std::string GenDotPath() const { + return dir_ + "/" + "graph_" + id_ + ".dot"; + } + + std::string Draw(DataFlowGraph* graph) { return graph->DotString(); } + + std::string dir_; + std::string id_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..3fc1cc18b855440c54c1ed6a9ab49a104c8c21f0 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc @@ -0,0 +1,46 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" + +#include +#include +#include +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST_F(DFG_Tester, dfg_graphviz_draw_pass_tester) { + auto dfg = ProgramDescToDFG(desc); + DFG_GraphvizDrawPass pass("./", "test"); + pass.Initialize(); + pass.Run(&dfg); + + // test content + std::ifstream file("./graph_test.dot"); + ASSERT_TRUE(file.is_open()); + + std::string line; + int no{0}; + while (std::getline(file, line)) { + no++; + } + ASSERT_EQ(no, 82); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index 5ada1d631269209e912e2d4817382ea2c6c67353..23ca8bfac84f35ebdca2e2a1a8538d366358ca8b 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -8,3 +8,5 @@ nv_test(test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor) nv_test(test_trt_mul_op SRCS test_mul_op.cc mul_op.cc DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) +nv_test(test_trt_fc_op SRCS test_fc_op.cc fc_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index 209936c3bafb0d31546856dc36c1b48053a0634b..668d344f1bba1c012dcb42c71b996209b4703d78 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -21,7 +21,8 @@ namespace tensorrt { class Conv2dOpConverter : public OpConverter { public: Conv2dOpConverter() {} - void operator()(const framework::proto::OpDesc& op) override { + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope) override { LOG(INFO) << "convert a fluid conv2d op to tensorrt conv layer without bias"; } diff --git a/paddle/fluid/inference/tensorrt/convert/fc_op.cc b/paddle/fluid/inference/tensorrt/convert/fc_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..45b079559754a8f5c3fe39781b5700a75f425e99 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/fc_op.cc @@ -0,0 +1,119 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +// Reorder the elements from istrides to ostrides, borrowed from TRT convert in +// tensorflow. +// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorrt/convert/convert_nodes.cc#L318 +template +void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides, + T* odata, nvinfer1::DimsHW ostrides) { + for (int h = 0; h < shape.h(); ++h) { + for (int w = 0; w < shape.w(); ++w) { + odata[h * ostrides.h() + w * ostrides.w()] = + idata[h * ostrides.h() + w * ostrides.w()]; + } + } +} + +// Reorder the data layout from CK to KC. +void ReorderCKtoKC(TensorRTEngine::Weight& iweights, + TensorRTEngine::Weight* oweights) { + int c = iweights.dims[0]; + int k = iweights.dims[1]; + oweights->dims.assign({k, c}); + nvinfer1::DimsHW istrides = {1, k}; + nvinfer1::DimsHW ostrides = {c, 1}; + Reorder2({k, c}, static_cast(iweights.get().values), istrides, + static_cast(const_cast(oweights->get().values)), + ostrides); +} + +/* + * FC converter convert a MUL op in Fluid to a FC layer in TRT. + */ +class FcOpConverter : public OpConverter { + public: + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope) override { + VLOG(4) << "convert a fluid fc op to tensorrt fc layer without bias"; + + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + // Declare inputs + auto* X = engine_->GetITensor(op_desc.Input("X").front()); + + // Declare weights + auto* Y_v = scope.FindVar(op_desc.Input("Y").front()); + PADDLE_ENFORCE_NOT_NULL(Y_v); + auto* Y_t = Y_v->GetMutable(); + // This may trigger a GPU->CPU copy, because TRT's weight can only be + // assigned from CPU memory, that can't be avoided. + auto* weight_data = Y_t->mutable_data(platform::CPUPlace()); + PADDLE_ENFORCE_EQ(Y_t->dims().size(), 2UL); // a matrix + size_t n_output = Y_t->dims()[1]; + + framework::LoDTensor tmp; + tmp.Resize(Y_t->dims()); + memcpy(tmp.mutable_data(platform::CPUPlace()), Y_t->data(), + Y_t->dims()[0] * Y_t->dims()[1]); + + TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT, + static_cast(weight_data), + Y_t->memory_size() / sizeof(float)}; + TensorRTEngine::Weight tmp_weight(nvinfer1::DataType::kFLOAT, + static_cast(tmp.data()), + Y_t->memory_size() / sizeof(float)); + weight.dims.assign({Y_t->dims()[0], Y_t->dims()[1]}); + tmp_weight.dims = weight.dims; + + // The data layout of TRT FC layer's weight is different from fluid's FC, + // need to reorder the elements. + ReorderCKtoKC(tmp_weight, &weight); + + // Currently, the framework can only handle one fluid op -> one TRT layer, + // but fc fuses `mul` and `bias` (2 fluid ops), so here is a trick, just + // handle `mul`, leave `add` as another layer. + // DEBUG + TensorRTEngine::Weight bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; + + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, + *const_cast(X), + n_output, weight.get(), bias.get()); + + auto output_name = op_desc.Output("Out").front(); + engine_->DeclareOutput(layer, 0, output_name); + } +}; + +REGISTER_TRT_OP_CONVERTER(fc, FcOpConverter); + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(mul); diff --git a/paddle/fluid/inference/tensorrt/convert/mul_op.cc b/paddle/fluid/inference/tensorrt/convert/mul_op.cc index aa8e66490f7e40038b0de4da32655f1b168ca332..6bb07709c7ee1c6b29c46425849a4f472d3df59d 100644 --- a/paddle/fluid/inference/tensorrt/convert/mul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/mul_op.cc @@ -24,8 +24,9 @@ namespace tensorrt { class MulOpConverter : public OpConverter { public: MulOpConverter() {} - void operator()(const framework::proto::OpDesc& op) override { - VLOG(4) << "convert a fluid mul op to tensorrt fc layer without bias"; + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope) override { + VLOG(4) << "convert a fluid mul op to tensorrt mul layer without bias"; framework::OpDesc op_desc(op, nullptr); // Declare inputs diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 1cd3ed9a00acead2599420f88499bd0d74c2974b..3beafeefd06f24ec50b0e61c1fabe13d7e53f242 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -31,27 +31,42 @@ namespace tensorrt { class OpConverter { public: OpConverter() {} - virtual void operator()(const framework::proto::OpDesc& op) {} - void Run(const framework::proto::OpDesc& op, TensorRTEngine* engine) { - std::string type = op.type(); - auto* it = Registry::Lookup(type); - PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", type); - it->SetEngine(engine); - (*it)(op); - } + // Converter logic for an op. + virtual void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope) {} + + // Convert a single fluid operaotr and add the corresponding layer to TRT. + void ConvertOp(const framework::proto::OpDesc& op, + const std::unordered_set& parameters, + const framework::Scope& scope, TensorRTEngine* engine) { + framework::OpDesc op_desc(op, nullptr); + + OpConverter* it{nullptr}; - // convert fluid op to tensorrt layer - void ConvertOp(const framework::proto::OpDesc& op, TensorRTEngine* engine) { - OpConverter::Run(op, engine); + if (op_desc.Type() == "mul") { + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); + std::string Y = op_desc.Input("Y")[0]; + if (parameters.count(Y)) { + it = Registry::Lookup("fc"); + } + } + if (!it) { + it = Registry::Lookup(op_desc.Type()); + } + PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", + op_desc.Type()); + it->SetEngine(engine); + (*it)(op, scope); } // convert fluid block to tensorrt network void ConvertBlock(const framework::proto::BlockDesc& block, - TensorRTEngine* engine) { + const std::unordered_set& parameters, + const framework::Scope& scope, TensorRTEngine* engine) { for (int i = 0; i < block.ops_size(); i++) { const auto& op = block.ops(i); - OpConverter::Run(op, engine); + ConvertOp(op, parameters, scope, engine); } } diff --git a/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc b/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..a30253072ac581ceca85ca10151a176f87a7cb39 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc @@ -0,0 +1,46 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(fc_op, test) { + std::unordered_set parameters({"mul-Y"}); + framework::Scope scope; + TRTConvertValidation validator(20, parameters, scope, 1000); + + validator.DeclInputVar("mul-X", nvinfer1::Dims4(8, 3, 1, 1)); + validator.DeclParamVar("mul-Y", nvinfer1::Dims2(3, 2)); + validator.DeclOutputVar("mul-Out", nvinfer1::Dims2(8, 2)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("mul"); + desc.SetInput("X", {"mul-X"}); + desc.SetInput("Y", {"mul-Y"}); + desc.SetOutput("Out", {"mul-Out"}); + + validator.SetOp(*desc.Proto()); + + validator.Execute(10); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc index d8b61d5f08ffd071c112b4677fcb6f6f50784bbc..1ce1130e5d660d717a1262a1fbdb4b620462c0b3 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc @@ -21,7 +21,9 @@ namespace inference { namespace tensorrt { TEST(MulOpConverter, main) { - TRTConvertValidation validator(10, 1000); + framework::Scope scope; + std::unordered_set parameters; + TRTConvertValidation validator(10, parameters, scope, 1000); validator.DeclInputVar("mul-X", nvinfer1::Dims2(10, 6)); validator.DeclInputVar("mul-Y", nvinfer1::Dims2(6, 10)); validator.DeclOutputVar("mul-Out", nvinfer1::Dims2(10, 10)); diff --git a/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc b/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc index 9ae7de9cbfa656fbcbb48557bd4b548115897c6d..1d3f5eabb2f839b2acfa9da6527589df1ec3767f 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + #include #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" namespace paddle { namespace inference { @@ -27,7 +28,9 @@ TEST(OpConverter, ConvertBlock) { conv2d_op->SetType("conv2d"); OpConverter converter; - converter.ConvertBlock(*block->Proto(), nullptr /*TensorRTEngine*/); + framework::Scope scope; + converter.ConvertBlock(*block->Proto(), {}, scope, + nullptr /*TensorRTEngine*/); } } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index 684bbc208fc1cb02d2a36b4de720309ea6bed173..d7e05dd5b5b235b7b166b22c5b094dc364e28dfc 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -61,7 +61,10 @@ class TRTConvertValidation { public: TRTConvertValidation() = delete; - explicit TRTConvertValidation(int batch_size, int workspace_size = 1024) { + TRTConvertValidation(int batch_size, + const std::unordered_set& parameters, + framework::Scope& scope, int workspace_size = 1 << 10) + : parameters_(parameters), scope_(scope) { // create engine. engine_.reset(new TensorRTEngine(10, 1 << 10, &stream_)); engine_->InitNetwork(); @@ -76,19 +79,22 @@ class TRTConvertValidation { engine_->DeclareInput(name, nvinfer1::DataType::kFLOAT, dims); } + // Declare a parameter varaible in the scope. + void DeclParamVar(const std::string& name, const nvinfer1::Dims& dims) { + DeclVar(name, dims); + } + void DeclOutputVar(const std::string& name, const nvinfer1::Dims& dims) { DeclVar(name, dims); } + // Declare a variable in a fluid Scope. void DeclVar(const std::string& name, const nvinfer1::Dims& dims) { platform::CPUPlace place; platform::CPUDeviceContext ctx(place); // Init Fluid tensor. - std::vector dim_vec(dims.nbDims); - for (int i = 0; i < dims.nbDims; i++) { - dim_vec[i] = dims.d[i]; - } + std::vector dim_vec(dims.d, dims.d + dims.nbDims); auto* x = scope_.Var(name); auto* x_tensor = x->GetMutable(); x_tensor->Resize(framework::make_ddim(dim_vec)); @@ -99,7 +105,7 @@ class TRTConvertValidation { op_ = framework::OpRegistry::CreateOp(desc); OpConverter op_converter; - op_converter.ConvertOp(desc, engine_.get()); + op_converter.ConvertOp(desc, parameters_, scope_, engine_.get()); engine_->FreezeNetwork(); @@ -108,11 +114,13 @@ class TRTConvertValidation { // Set Inputs. for (const auto& input : op_desc_->InputArgumentNames()) { + if (parameters_.count(input)) continue; auto* var = scope_.FindVar(input); PADDLE_ENFORCE(var); auto tensor = var->GetMutable(); + engine_->SetInputFromCPU( - input, static_cast(tensor->data()), + input, static_cast(tensor->data()), sizeof(float) * analysis::AccuDims(tensor->dims(), tensor->dims().size())); } @@ -120,18 +128,21 @@ class TRTConvertValidation { void Execute(int batch_size) { // Execute Fluid Op - // Execute TRT platform::CPUPlace place; platform::CPUDeviceContext ctx(place); - engine_->Execute(batch_size); - op_->Run(scope_, place); + // Execute TRT. + engine_->Execute(batch_size); + cudaStreamSynchronize(*engine_->stream()); ASSERT_FALSE(op_desc_->OutputArgumentNames().empty()); + const size_t output_space_size = 200; for (const auto& output : op_desc_->OutputArgumentNames()) { std::vector fluid_out; - std::vector trt_out(200); - engine_->GetOutputInCPU(output, &trt_out[0], 200 * sizeof(float)); + std::vector trt_out(output_space_size); + engine_->GetOutputInCPU(output, &trt_out[0], + output_space_size * sizeof(float)); + cudaStreamSynchronize(*engine_->stream()); auto* var = scope_.FindVar(output); auto tensor = var->GetMutable(); @@ -139,7 +150,7 @@ class TRTConvertValidation { // Compare two output ASSERT_FALSE(fluid_out.empty()); for (size_t i = 0; i < fluid_out.size(); i++) { - EXPECT_LT(std::abs(fluid_out[i] - trt_out[i]), 0.001); + EXPECT_LT(std::abs(fluid_out[i] - trt_out[i]), 1e-6); } } } @@ -149,9 +160,10 @@ class TRTConvertValidation { private: std::unique_ptr engine_; cudaStream_t stream_; - framework::Scope scope_; std::unique_ptr op_; std::unique_ptr op_desc_; + const std::unordered_set& parameters_; + framework::Scope& scope_; }; } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index a88236ae98e1816fc43796ead596c432b798d7de..3d75fefc1a735168131a6c67ac073e80aba32945 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -106,6 +106,7 @@ void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer* layer, int offset, name); auto* output = layer->getOutput(offset); + SetITensor(name, output); PADDLE_ENFORCE(output != nullptr); output->setName(name.c_str()); infer_network_->markOutput(*output); diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index d9d3163b66d4c4c302d12edcc42f00e1cdfa5a30..fabcfd9e80cc0ef2637201a1499ebbe2d6adfd8c 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -37,13 +37,15 @@ class TensorRTEngine : public EngineBase { // Weight is model parameter. class Weight { public: - Weight(nvinfer1::DataType dtype, void* value, int num_elem) { + Weight(nvinfer1::DataType dtype, void* value, size_t num_elem) { w_.type = dtype; w_.values = value; w_.count = num_elem; } const nvinfer1::Weights& get() { return w_; } + std::vector dims; + private: nvinfer1::Weights w_; }; diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index dbb81462b8273bd701e9c9f530eaf69817abd6a1..2fa5a9540ba1311c7f87e6675a53044b23dd8276 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -38,3 +38,11 @@ inference_test(recommender_system) #inference_test(rnn_encoder_decoder) #inference_test(understand_sentiment ARGS conv) inference_test(word2vec) + +# This is an unly work around to make this test run +# TODO(TJ): clean me up +cc_test(test_inference_nlp + SRCS test_inference_nlp.cc + DEPS paddle_fluid + ARGS + --model_path=${PADDLE_BINARY_DIR}/python/paddle/fluid/tests/book/recognize_digits_mlp.inference.model) diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc new file mode 100644 index 0000000000000000000000000000000000000000..70aa42ac4111c0524a55e26aaefa864338c1d6c1 --- /dev/null +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -0,0 +1,236 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include // NOLINT +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/fluid/inference/tests/test_helper.h" +#ifdef PADDLE_WITH_MKLML +#include +#include +#endif + +DEFINE_string(model_path, "", "Directory of the inference model."); +DEFINE_string(data_file, "", "File of input index data."); +DEFINE_int32(repeat, 100, "Running the inference program repeat times"); +DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run inference"); +DEFINE_bool(prepare_vars, true, "Prepare variables before executor"); +DEFINE_int32(num_threads, 1, "Number of threads should be used"); + +inline double GetCurrentMs() { + struct timeval time; + gettimeofday(&time, NULL); + return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; +} + +// This function just give dummy data for recognize_digits model. +size_t DummyData(std::vector* out) { + paddle::framework::LoDTensor input; + SetupTensor(&input, {1, 1, 28, 28}, -1.f, 1.f); + out->emplace_back(input); + return 1; +} + +// Load the input word index data from file and save into LodTensor. +// Return the size of words. +size_t LoadData(std::vector* out, + const std::string& filename) { + if (filename.empty()) { + return DummyData(out); + } + + size_t sz = 0; + std::fstream fin(filename); + std::string line; + out->clear(); + while (getline(fin, line)) { + std::istringstream iss(line); + std::vector ids; + std::string field; + while (getline(iss, field, ' ')) { + ids.push_back(stoi(field)); + } + if (ids.size() >= 1024) { + // Synced with NLP guys, they will ignore input larger then 1024 + continue; + } + + paddle::framework::LoDTensor words; + paddle::framework::LoD lod{{0, ids.size()}}; + words.set_lod(lod); + int64_t* pdata = words.mutable_data( + {static_cast(ids.size()), 1}, paddle::platform::CPUPlace()); + memcpy(pdata, ids.data(), words.numel() * sizeof(int64_t)); + out->emplace_back(words); + sz += ids.size(); + } + return sz; +} + +// Split input data samples into small pieces jobs as balanced as possible, +// according to the number of threads. +void SplitData( + const std::vector& datasets, + std::vector>* jobs, + const int num_threads) { + size_t s = 0; + jobs->resize(num_threads); + while (s < datasets.size()) { + for (auto it = jobs->begin(); it != jobs->end(); it++) { + it->emplace_back(&datasets[s]); + s++; + if (s >= datasets.size()) { + break; + } + } + } +} + +void ThreadRunInfer( + const int tid, paddle::framework::Executor* executor, + paddle::framework::Scope* scope, + const std::unique_ptr& inference_program, + const std::vector>& jobs) { + auto copy_program = std::unique_ptr( + new paddle::framework::ProgramDesc(*inference_program)); + auto& sub_scope = scope->NewScope(); + + std::string feed_holder_name = "feed_" + paddle::string::to_string(tid); + std::string fetch_holder_name = "fetch_" + paddle::string::to_string(tid); + copy_program->SetFeedHolderName(feed_holder_name); + copy_program->SetFetchHolderName(fetch_holder_name); + + const std::vector& feed_target_names = + copy_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + copy_program->GetFetchTargetNames(); + + PADDLE_ENFORCE_EQ(fetch_target_names.size(), 1UL); + std::map fetch_targets; + paddle::framework::LoDTensor outtensor; + fetch_targets[fetch_target_names[0]] = &outtensor; + + std::map feed_targets; + PADDLE_ENFORCE_EQ(feed_target_names.size(), 1UL); + + auto& inputs = jobs[tid]; + auto start_ms = GetCurrentMs(); + for (size_t i = 0; i < inputs.size(); ++i) { + feed_targets[feed_target_names[0]] = inputs[i]; + executor->Run(*copy_program, &sub_scope, &feed_targets, &fetch_targets, + true /*create_local_scope*/, true /*create_vars*/, + feed_holder_name, fetch_holder_name); + } + auto stop_ms = GetCurrentMs(); + scope->DeleteScope(&sub_scope); + LOG(INFO) << "Tid: " << tid << ", process " << inputs.size() + << " samples, avg time per sample: " + << (stop_ms - start_ms) / inputs.size() << " ms"; +} + +TEST(inference, nlp) { + if (FLAGS_model_path.empty()) { + LOG(FATAL) << "Usage: ./example --model_path=path/to/your/model"; + } + if (FLAGS_data_file.empty()) { + LOG(WARNING) << "No data file provided, will use dummy data!" + << "Note: if you use nlp model, please provide data file."; + } + LOG(INFO) << "Model Path: " << FLAGS_model_path; + LOG(INFO) << "Data File: " << FLAGS_data_file; + + std::vector datasets; + size_t num_total_words = LoadData(&datasets, FLAGS_data_file); + LOG(INFO) << "Number of samples (seq_len<1024): " << datasets.size(); + LOG(INFO) << "Total number of words: " << num_total_words; + + const bool model_combined = false; + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // 1. Define place, executor, scope + auto place = paddle::platform::CPUPlace(); + auto executor = paddle::framework::Executor(place); + std::unique_ptr scope( + new paddle::framework::Scope()); + + // 2. Initialize the inference_program and load parameters + std::unique_ptr inference_program; + inference_program = + InitProgram(&executor, scope.get(), FLAGS_model_path, model_combined); + if (FLAGS_use_mkldnn) { + EnableMKLDNN(inference_program); + } + +#ifdef PADDLE_WITH_MKLML + // only use 1 thread number per std::thread + omp_set_dynamic(0); + omp_set_num_threads(1); + mkl_set_num_threads(1); +#endif + + double start_ms = 0, stop_ms = 0; + if (FLAGS_num_threads > 1) { + std::vector> jobs; + SplitData(datasets, &jobs, FLAGS_num_threads); + std::vector> threads; + start_ms = GetCurrentMs(); + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads.emplace_back( + new std::thread(ThreadRunInfer, i, &executor, scope.get(), + std::ref(inference_program), std::ref(jobs))); + } + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads[i]->join(); + } + stop_ms = GetCurrentMs(); + } else { + if (FLAGS_prepare_vars) { + executor.CreateVariables(*inference_program, scope.get(), 0); + } + // always prepare context + std::unique_ptr ctx; + ctx = executor.Prepare(*inference_program, 0); + + // preapre fetch + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); + PADDLE_ENFORCE_EQ(fetch_target_names.size(), 1UL); + std::map fetch_targets; + paddle::framework::LoDTensor outtensor; + fetch_targets[fetch_target_names[0]] = &outtensor; + + // prepare feed + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + PADDLE_ENFORCE_EQ(feed_target_names.size(), 1UL); + std::map feed_targets; + + // feed data and run + start_ms = GetCurrentMs(); + for (size_t i = 0; i < datasets.size(); ++i) { + feed_targets[feed_target_names[0]] = &(datasets[i]); + executor.RunPreparedContext(ctx.get(), scope.get(), &feed_targets, + &fetch_targets, !FLAGS_prepare_vars); + } + stop_ms = GetCurrentMs(); + LOG(INFO) << "Tid: 0, process " << datasets.size() + << " samples, avg time per sample: " + << (stop_ms - start_ms) / datasets.size() << " ms"; + } + LOG(INFO) << "Total inference time with " << FLAGS_num_threads + << " threads : " << (stop_ms - start_ms) / 1000.0 + << " sec, QPS: " << datasets.size() / ((stop_ms - start_ms) / 1000); +} diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index b892ac77d9ed60210ddadaecb1a4f214e5a25180..46ed99bcf2234f7621d9f00eb48c846d8a355795 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -222,35 +222,35 @@ struct MKLDNNActivationGradFunc : public BaseActivationFunctor { }; template -using ReluMkldnnFunctor = +using ReluMKLDNNFunctor = MKLDNNActivationFunc; template -using TanhMkldnnFunctor = +using TanhMKLDNNFunctor = MKLDNNActivationFunc; template -using SqrtMkldnnFunctor = +using SqrtMKLDNNFunctor = MKLDNNActivationFunc; template -using AbsMkldnnFunctor = +using AbsMKLDNNFunctor = MKLDNNActivationFunc; template -using ReluMkldnnGradFunctor = +using ReluMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using TanhMkldnnGradFunctor = +using TanhMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using SqrtMkldnnGradFunctor = +using SqrtMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using AbsMkldnnGradFunctor = +using AbsMKLDNNGradFunctor = MKLDNNActivationGradFunc; } // namespace operators } // namespace paddle @@ -265,9 +265,9 @@ namespace ops = paddle::operators; ops::MKLDNNActivationGradKernel>); #define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ - __macro(relu, ReluMkldnnFunctor, ReluMkldnnGradFunctor); \ - __macro(tanh, TanhMkldnnFunctor, TanhMkldnnGradFunctor); \ - __macro(sqrt, SqrtMkldnnFunctor, SqrtMkldnnGradFunctor); \ - __macro(abs, AbsMkldnnFunctor, AbsMkldnnGradFunctor); + __macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \ + __macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor); \ + __macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor); \ + __macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor); FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL); diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 7a7b8b76e43b1f91a3ba2767c217993cc39f26b6..1828be57b5a54005a0066b18ebebdb740726f67a 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" -DEFINE_bool(cudnn_algo_use_autotune, true, +DEFINE_bool(cudnn_deterministic, true, "Whether allow using an autotuning algorithm for convolution " "operator. The autotuning algorithm may be non-deterministic. If " "false, the algorithm is deterministic."); @@ -272,7 +272,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); if (input_grad) { - if (FLAGS_cudnn_algo_use_autotune) { + if (FLAGS_cudnn_deterministic) { PADDLE_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, @@ -297,7 +297,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } if (filter_grad) { - if (FLAGS_cudnn_algo_use_autotune) { + if (FLAGS_cudnn_deterministic) { PADDLE_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( handle, cudnn_input_desc, cudnn_output_grad_desc, diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index da9ca1a0c1d55018141f0e4285fe35d7c437fd55..f4d83e86ecb01eed863a387d827023a5d808dad0 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -38,6 +38,25 @@ void RPCClient::Init() { if (rpc_client_.get() == nullptr) { rpc_client_.reset(new RPCClient()); } + rpc_client_->InitEventLoop(); +} + +void RPCClient::InitEventLoop() { + // start the client process thread + // TODO(wuyi): can make this in a threadpool + client_thread_.reset(new std::thread(std::bind(&RPCClient::Proceed, this))); +} + +RPCClient::~RPCClient() { + Wait(); + cq_.Shutdown(); + { + std::lock_guard guard(chan_mutex_); + for (auto& it : channels_) { + it.second.reset(); + } + } + client_thread_->join(); } bool RPCClient::AsyncSendVariable(const std::string& ep, @@ -204,70 +223,37 @@ void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { req_count_++; } -bool RPCClient::Wait() { - VLOG(3) << "RPCClient begin Wait()" - << " req_count_:" << req_count_; - if (req_count_ <= 0) { - return true; - } - const size_t kReqCnt = req_count_; - bool a[kReqCnt]; - std::vector> waits(req_count_); - std::mutex mu; - - for (int i = 0; i < req_count_; i++) { - waits[i] = framework::AsyncIO([i, &a, &mu, this] { - bool ret = Proceed(); - std::lock_guard l(mu); - a[i] = ret; - }); - } - - for (int i = 0; i < req_count_; i++) { - waits[i].wait(); - } - - int last_req_count = req_count_; - req_count_ = 0; - - for (int i = 0; i < last_req_count; i++) { - if (!a[i]) { - return false; - } - } - - return true; +void RPCClient::Wait() { + std::unique_lock lk(sync_mutex_); + sync_cond_.wait(lk, [this] { return req_count_ == 0; }); } -bool RPCClient::Proceed() { - void* tag = NULL; +void RPCClient::Proceed() { + void* tag = nullptr; bool ok = false; - // request counts. - if (!cq_.Next(&tag, &ok)) { - LOG(ERROR) << "Get meets CompletionQueue error"; - return false; - } - - GPR_ASSERT(ok); - PADDLE_ENFORCE(tag); - - // TODO(gongwb): add more retries. - BaseProcessor* c = static_cast(tag); - if (!c->status_.ok()) { - LOG(ERROR) << "proc param error:" << c->var_h_.String() - << " grpc error:" << c->status_.error_message(); + while (cq_.Next(&tag, &ok)) { + BaseProcessor* c = static_cast(tag); + GPR_ASSERT(ok); + PADDLE_ENFORCE(c); + if (c->status_.ok()) { + c->Process(); + } else { + LOG(ERROR) << "var: " << c->var_h_.String() + << " grpc error:" << c->status_.error_message(); + } delete c; - return false; + { + std::lock_guard lk(sync_mutex_); + req_count_--; + } + sync_cond_.notify_all(); } - - c->Process(); - delete c; - return true; } + std::shared_ptr RPCClient::GetChannel(const std::string& ep) { // TODO(Yancey1989): make grpc client completely thread-safe - std::unique_lock lock(mutex_); + std::lock_guard guard(chan_mutex_); auto it = channels_.find(ep); if (it != channels_.end()) { return it->second; diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index 449d5105afb8c02294a0ef57610e7de1b1631b35..bb3813efcf4f77a8ec3d2f4b39969faa6216e38f 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -16,15 +16,18 @@ limitations under the License. */ #include -#include // NOLINT +#include // NOLINT +#include // NOLINT #include #include #include #include #include // NOLINT #include +#include // NOLINT #include +#include "grpc++/channel.h" #include "grpc++/generic/generic_stub.h" #include "grpc++/grpc++.h" #include "grpc++/support/byte_buffer.h" @@ -164,6 +167,7 @@ class FetchBarrierProcessor : public BaseProcessor { class RPCClient { public: RPCClient() {} + ~RPCClient(); static RPCClient* GetInstance(); @@ -192,19 +196,28 @@ class RPCClient { void AsyncSendFetchBarrier(const std::string& ep, int64_t time_out = 600 * 1000); - bool Wait(); + void Wait(); + // InitEventLoop should only be called by Init() + void InitEventLoop(); private: - bool Proceed(); + void Proceed(); std::shared_ptr GetChannel(const std::string& ep); // Init is called by GetInstance. static void Init(); private: grpc::CompletionQueue cq_; - std::map> channels_; + std::unordered_map> channels_; + std::unique_ptr client_thread_; + + // mutex for Wait client sync + std::mutex sync_mutex_; + std::condition_variable sync_cond_; std::atomic req_count_{0}; - std::mutex mutex_; + + // mutex for GetChannel thread safety + std::mutex chan_mutex_; static std::unique_ptr rpc_client_; static std::once_flag init_flag_; DISABLE_COPY_AND_ASSIGN(RPCClient); diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index e73756d89004bc48339c0aa31dd0857c2ca6722d..57867aad4d679f75ea790b65b5773a73586fd96e 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -68,9 +68,7 @@ class RequestSend final : public RequestBase { method_id, &ctx_, request_.get(), &responder_, cq_, cq_, reinterpret_cast(static_cast(req_id))); } - virtual ~RequestSend() {} - std::string GetReqName() override { return request_->Varname(); } void Process() override { @@ -82,7 +80,6 @@ class RequestSend final : public RequestBase { framework::Variable* outvar = nullptr; request_handler_->Handle(varname, scope, invar, &outvar); - status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, reinterpret_cast(static_cast(req_id_))); @@ -125,7 +122,6 @@ class RequestGet final : public RequestBase { SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), &reply_); } - status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, reinterpret_cast(static_cast(req_id_))); @@ -170,10 +166,9 @@ class RequestPrefetch final : public RequestBase { SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), &reply_); - - status_ = FINISH; responder_.Finish(reply_, ::grpc::Status::OK, reinterpret_cast(static_cast(req_id_))); + status_ = FINISH; } protected: diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index d1fcbc414f123c5c4810d9cecf807a406aa2c405..e6ffc7066f24d5088a95801ed1c0670b24d5771f 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -71,8 +71,6 @@ class AsyncGRPCServer final : public RPCServer { std::unique_ptr<::grpc::Server> server_; // condition of the sub program - std::mutex barrier_mutex_; - mutable int barrier_cond_step_; std::condition_variable barrier_condition_; std::mutex mutex_ready_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index f97f638701cfb263f28dddbdc3bc80fb16468744..22a3a8135759c04b051d4ec2d2707e6752df2de2 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -113,10 +113,6 @@ void StartServer() { std::thread server_thread( std::bind(&detail::AsyncGRPCServer::StartServer, g_rpc_service.get())); - // FIXME(gongwb): don't use hard time. - sleep(10); - LOG(INFO) << "got nccl id and stop server..."; - g_rpc_service->ShutDown(); server_thread.join(); } @@ -127,7 +123,7 @@ TEST(PREFETCH, CPU) { std::thread server_thread(StartServer); g_rpc_service->WaitServerReady(); - detail::RPCClient client; + detail::RPCClient* client = detail::RPCClient::GetInstance(); int port = g_rpc_service->GetSelectedPort(); std::string ep = paddle::string::Sprintf("127.0.0.1:%d", port); @@ -141,8 +137,8 @@ TEST(PREFETCH, CPU) { std::string in_var_name("ids"); std::string out_var_name("out"); - client.AsyncPrefetchVariable(ep, ctx, scope, in_var_name, out_var_name); - client.Wait(); + client->AsyncPrefetchVariable(ep, ctx, scope, in_var_name, out_var_name); + client->Wait(); auto var = scope.Var(out_var_name); auto value = var->GetMutable()->value(); auto ptr = value.mutable_data(place); @@ -152,6 +148,7 @@ TEST(PREFETCH, CPU) { } } + g_rpc_service->ShutDown(); server_thread.join(); LOG(INFO) << "begin reset"; g_rpc_service.reset(nullptr); diff --git a/paddle/fluid/operators/detection/box_coder_op.cc b/paddle/fluid/operators/detection/box_coder_op.cc index 74848005d0bea6e5c818ff999727aa2b8ad51d84..8c4b4321b7582a5cfad89f23e3d298ed16162d99 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cc +++ b/paddle/fluid/operators/detection/box_coder_op.cc @@ -22,21 +22,21 @@ class BoxCoderOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("PriorBox"), "Input(PriorBox) of BoxCoderOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("PriorBoxVar"), - "Input(PriorBoxVar) of BoxCoderOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("TargetBox"), "Input(TargetBox) of BoxCoderOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("OutputBox"), "Output(OutputBox) of BoxCoderOp should not be null."); auto prior_box_dims = ctx->GetInputDim("PriorBox"); - auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); auto target_box_dims = ctx->GetInputDim("TargetBox"); PADDLE_ENFORCE_EQ(prior_box_dims.size(), 2, "The rank of Input of PriorBoxVar must be 2"); PADDLE_ENFORCE_EQ(prior_box_dims[1], 4, "The shape of PriorBox is [N, 4]"); - PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + if (ctx->HasInput("PriorBoxVar")) { + auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); + PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + } auto code_type = GetBoxCodeType(ctx->Attrs().Get("code_type")); if (code_type == BoxCodeType::kEncodeCenterSize) { @@ -71,9 +71,11 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "of the coordinate system. [xmax, ymax] is the right bottom " "coordinate of the anchor box."); AddInput("PriorBoxVar", - "(Tensor, default Tensor) " + "(Tensor, default Tensor, optional) " "PriorBoxVar is a 2-D Tensor with shape [M, 4] holds M group " - "of variance."); + "of variance. PriorBoxVar will set all elements to 1 by " + "default.") + .AsDispensable(); AddInput( "TargetBox", "(LoDTensor or Tensor) This input can be a 2-D LoDTensor with shape " @@ -91,6 +93,10 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "the code type used with the target box") .SetDefault("encode_center_size") .InEnum({"encode_center_size", "decode_center_size"}); + AddAttr("box_normalized", + "(bool, default true) " + "whether treat the priorbox as a noramlized box") + .SetDefault(true); AddOutput("OutputBox", "(LoDTensor or Tensor) " "When code_type is 'encode_center_size', the output tensor of " @@ -127,5 +133,6 @@ width and height. namespace ops = paddle::operators; REGISTER_OPERATOR(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL(box_coder, ops::BoxCoderKernel, - ops::BoxCoderKernel); +REGISTER_OP_CPU_KERNEL( + box_coder, ops::BoxCoderKernel, + ops::BoxCoderKernel); diff --git a/paddle/fluid/operators/detection/box_coder_op.cu b/paddle/fluid/operators/detection/box_coder_op.cu index 8cef8e03439df4ca5b0fa94176a21a36f9eb9f70..a7af111f63d654319dd1d90d2032956951dfe49e 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cu +++ b/paddle/fluid/operators/detection/box_coder_op.cu @@ -20,15 +20,16 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, const T* prior_box_var_data, const T* target_box_data, const int row, const int col, const int len, - T* output) { + const bool normalized, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < row * col) { const int row_idx = idx / col; const int col_idx = idx % col; - T prior_box_width = - prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len]; - T prior_box_height = - prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1]; + T prior_box_width = prior_box_data[col_idx * len + 2] - + prior_box_data[col_idx * len] + (normalized == false); + T prior_box_height = prior_box_data[col_idx * len + 3] - + prior_box_data[col_idx * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; T prior_box_center_y = (prior_box_data[col_idx * len + 3] + @@ -41,20 +42,24 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, T target_box_center_y = (target_box_data[row_idx * len + 3] + target_box_data[row_idx * len + 1]) / 2; - T target_box_width = - target_box_data[row_idx * len + 2] - target_box_data[row_idx * len]; - T target_box_height = - target_box_data[row_idx * len + 3] - target_box_data[row_idx * len + 1]; + T target_box_width = target_box_data[row_idx * len + 2] - + target_box_data[row_idx * len] + (normalized == false); + T target_box_height = target_box_data[row_idx * len + 3] - + target_box_data[row_idx * len + 1] + + (normalized == false); - output[idx * len] = (target_box_center_x - prior_box_center_x) / - prior_box_width / prior_box_var_data[col_idx * len]; - output[idx * len + 1] = (target_box_center_y - prior_box_center_y) / - prior_box_height / - prior_box_var_data[col_idx * len + 1]; - output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)) / - prior_box_var_data[col_idx * len + 2]; - output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)) / - prior_box_var_data[col_idx * len + 3]; + output[idx * len] = + (target_box_center_x - prior_box_center_x) / prior_box_width; + output[idx * len + 1] = + (target_box_center_y - prior_box_center_y) / prior_box_height; + output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)); + output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)); + if (prior_box_var_data) { + output[idx * len] /= prior_box_var_data[col_idx * len]; + output[idx * len + 1] /= prior_box_var_data[col_idx * len + 1]; + output[idx * len + 2] /= prior_box_var_data[col_idx * len + 2]; + output[idx * len + 3] /= prior_box_var_data[col_idx * len + 3]; + } } } @@ -63,42 +68,56 @@ __global__ void DecodeCenterSizeKernel(const T* prior_box_data, const T* prior_box_var_data, const T* target_box_data, const int row, const int col, const int len, - T* output) { + const bool normalized, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < row * col) { const int col_idx = idx % col; - T prior_box_width = - prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len]; - T prior_box_height = - prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1]; + T prior_box_width = prior_box_data[col_idx * len + 2] - + prior_box_data[col_idx * len] + (normalized == false); + T prior_box_height = prior_box_data[col_idx * len + 3] - + prior_box_data[col_idx * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; T prior_box_center_y = (prior_box_data[col_idx * len + 3] + prior_box_data[col_idx * len + 1]) / 2; - - T target_box_width = exp(prior_box_var_data[col_idx * len + 2] * + T target_box_width, target_box_height; + T target_box_center_x, target_box_center_y; + if (prior_box_var_data) { + target_box_width = exp(prior_box_var_data[col_idx * len + 2] * target_box_data[idx * len + 2]) * prior_box_width; - T target_box_height = exp(prior_box_var_data[col_idx * len + 3] * + target_box_height = exp(prior_box_var_data[col_idx * len + 3] * target_box_data[idx * len + 3]) * prior_box_height; - T target_box_center_x = prior_box_var_data[col_idx * len] * + target_box_center_x = prior_box_var_data[col_idx * len] * target_box_data[idx * len] * prior_box_width + prior_box_center_x; - T target_box_center_y = prior_box_var_data[col_idx * len + 1] * + target_box_center_y = prior_box_var_data[col_idx * len + 1] * target_box_data[idx * len + 1] * prior_box_height + prior_box_center_y; + } else { + target_box_width = exp(target_box_data[idx * len + 2]) * prior_box_width; + target_box_height = + exp(target_box_data[idx * len + 3]) * prior_box_height; + target_box_center_x = + target_box_data[idx * len] * prior_box_width + prior_box_center_x; + target_box_center_y = target_box_data[idx * len + 1] * prior_box_height + + prior_box_center_y; + } output[idx * len] = target_box_center_x - target_box_width / 2; output[idx * len + 1] = target_box_center_y - target_box_height / 2; - output[idx * len + 2] = target_box_center_x + target_box_width / 2; - output[idx * len + 3] = target_box_center_y + target_box_height / 2; + output[idx * len + 2] = + target_box_center_x + target_box_width / 2 - (normalized == false); + output[idx * len + 3] = + target_box_center_y + target_box_height / 2 - (normalized == false); } } -template +template class BoxCoderCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -109,6 +128,11 @@ class BoxCoderCUDAKernel : public framework::OpKernel { auto* target_box = context.Input("TargetBox"); auto* output_box = context.Output("OutputBox"); + const T* prior_box_data = prior_box->data(); + const T* target_box_data = target_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); + if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1, "Only support 1 level of LoD."); @@ -120,22 +144,19 @@ class BoxCoderCUDAKernel : public framework::OpKernel { int grid = (row * col + block - 1) / block; auto& device_ctx = context.cuda_device_context(); - const T* prior_box_data = prior_box->data(); - const T* prior_box_var_data = prior_box_var->data(); - const T* target_box_data = target_box->data(); - output_box->mutable_data({row, col, len}, context.GetPlace()); T* output = output_box->data(); auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - output); + normalized, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { DecodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - output); + normalized, output); } } }; @@ -144,5 +165,7 @@ class BoxCoderCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(box_coder, ops::BoxCoderCUDAKernel, - ops::BoxCoderCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + box_coder, + ops::BoxCoderCUDAKernel, + ops::BoxCoderCUDAKernel); diff --git a/paddle/fluid/operators/detection/box_coder_op.h b/paddle/fluid/operators/detection/box_coder_op.h index 77fc6c2b62af42e6526b889aeef2d9bab795baec..5ed8520acddfa8fe2105a7c1615bcb3243cb130f 100644 --- a/paddle/fluid/operators/detection/box_coder_op.h +++ b/paddle/fluid/operators/detection/box_coder_op.h @@ -28,26 +28,28 @@ inline BoxCodeType GetBoxCodeType(const std::string& type) { PADDLE_THROW("Not support type %s.", type); } -template +template class BoxCoderKernel : public framework::OpKernel { public: - void EncodeCenterSize(const framework::Tensor& target_box, - const framework::Tensor& prior_box, - const framework::Tensor& prior_box_var, - T* output) const { - int64_t row = target_box.dims()[0]; - int64_t col = prior_box.dims()[0]; - int64_t len = prior_box.dims()[1]; - auto* target_box_data = target_box.data(); - auto* prior_box_data = prior_box.data(); - auto* prior_box_var_data = prior_box_var.data(); + void EncodeCenterSize(const framework::Tensor* target_box, + const framework::Tensor* prior_box, + const framework::Tensor* prior_box_var, + const bool normalized, T* output) const { + int64_t row = target_box->dims()[0]; + int64_t col = prior_box->dims()[0]; + int64_t len = prior_box->dims()[1]; + auto* target_box_data = target_box->data(); + auto* prior_box_data = prior_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { - T prior_box_width = - prior_box_data[j * len + 2] - prior_box_data[j * len]; - T prior_box_height = - prior_box_data[j * len + 3] - prior_box_data[j * len + 1]; + T prior_box_width = prior_box_data[j * len + 2] - + prior_box_data[j * len] + (normalized == false); + T prior_box_height = prior_box_data[j * len + 3] - + prior_box_data[j * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; T prior_box_center_y = @@ -57,67 +59,89 @@ class BoxCoderKernel : public framework::OpKernel { (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; T target_box_center_y = (target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2; - T target_box_width = - target_box_data[i * len + 2] - target_box_data[i * len]; - T target_box_height = - target_box_data[i * len + 3] - target_box_data[i * len + 1]; + T target_box_width = target_box_data[i * len + 2] - + target_box_data[i * len] + (normalized == false); + T target_box_height = target_box_data[i * len + 3] - + target_box_data[i * len + 1] + + (normalized == false); size_t offset = i * col * len + j * len; - output[offset] = (target_box_center_x - prior_box_center_x) / - prior_box_width / prior_box_var_data[j * len]; - output[offset + 1] = (target_box_center_y - prior_box_center_y) / - prior_box_height / prior_box_var_data[j * len + 1]; + output[offset] = + (target_box_center_x - prior_box_center_x) / prior_box_width; + output[offset + 1] = + (target_box_center_y - prior_box_center_y) / prior_box_height; output[offset + 2] = - std::log(std::fabs(target_box_width / prior_box_width)) / - prior_box_var_data[j * len + 2]; + std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = - std::log(std::fabs(target_box_height / prior_box_height)) / - prior_box_var_data[j * len + 3]; + std::log(std::fabs(target_box_height / prior_box_height)); + if (prior_box_var) { + output[offset] /= prior_box_var_data[j * len]; + output[offset + 1] /= prior_box_var_data[j * len + 1]; + output[offset + 2] /= prior_box_var_data[j * len + 2]; + output[offset + 3] /= prior_box_var_data[j * len + 3]; + } } } } - void DecodeCenterSize(const framework::Tensor& target_box, - const framework::Tensor& prior_box, - const framework::Tensor& prior_box_var, - T* output) const { - int64_t row = target_box.dims()[0]; - int64_t col = prior_box.dims()[0]; - int64_t len = prior_box.dims()[1]; - - auto* target_box_data = target_box.data(); - auto* prior_box_data = prior_box.data(); - auto* prior_box_var_data = prior_box_var.data(); + void DecodeCenterSize(const framework::Tensor* target_box, + const framework::Tensor* prior_box, + const framework::Tensor* prior_box_var, + const bool normalized, T* output) const { + int64_t row = target_box->dims()[0]; + int64_t col = prior_box->dims()[0]; + int64_t len = prior_box->dims()[1]; + + auto* target_box_data = target_box->data(); + auto* prior_box_data = prior_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { size_t offset = i * col * len + j * len; - T prior_box_width = - prior_box_data[j * len + 2] - prior_box_data[j * len]; - T prior_box_height = - prior_box_data[j * len + 3] - prior_box_data[j * len + 1]; + T prior_box_width = prior_box_data[j * len + 2] - + prior_box_data[j * len] + (normalized == false); + T prior_box_height = prior_box_data[j * len + 3] - + prior_box_data[j * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; T prior_box_center_y = (prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2; - T target_box_center_x = prior_box_var_data[j * len] * + T target_box_center_x = 0, target_box_center_y = 0; + T target_box_width = 0, target_box_height = 0; + if (prior_box_var) { + target_box_center_x = prior_box_var_data[j * len] * target_box_data[offset] * prior_box_width + prior_box_center_x; - T target_box_center_y = prior_box_var_data[j * len + 1] * + target_box_center_y = prior_box_var_data[j * len + 1] * target_box_data[offset + 1] * prior_box_height + prior_box_center_y; - T target_box_width = std::exp(prior_box_var_data[j * len + 2] * + target_box_width = std::exp(prior_box_var_data[j * len + 2] * target_box_data[offset + 2]) * prior_box_width; - T target_box_height = std::exp(prior_box_var_data[j * len + 3] * + target_box_height = std::exp(prior_box_var_data[j * len + 3] * target_box_data[offset + 3]) * prior_box_height; + } else { + target_box_center_x = + target_box_data[offset] * prior_box_width + prior_box_center_x; + target_box_center_y = target_box_data[offset + 1] * prior_box_height + + prior_box_center_y; + target_box_width = + std::exp(target_box_data[offset + 2]) * prior_box_width; + target_box_height = + std::exp(target_box_data[offset + 3]) * prior_box_height; + } output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; - output[offset + 2] = target_box_center_x + target_box_width / 2; - output[offset + 3] = target_box_center_y + target_box_height / 2; + output[offset + 2] = + target_box_center_x + target_box_width / 2 - (normalized == false); + output[offset + 3] = + target_box_center_y + target_box_height / 2 - (normalized == false); } } } @@ -139,11 +163,14 @@ class BoxCoderKernel : public framework::OpKernel { output_box->mutable_data({row, col, len}, context.GetPlace()); auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); T* output = output_box->data(); if (code_type == BoxCodeType::kEncodeCenterSize) { - EncodeCenterSize(*target_box, *prior_box, *prior_box_var, output); + EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, + output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { - DecodeCenterSize(*target_box, *prior_box, *prior_box_var, output); + DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, + output); } } }; diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/fetch_barrier_op.cc index 79ec02f52094121d01c6bda2a5d99d2211893e89..1e2c93335fb9cc6b231857783743eda4e387bf39 100644 --- a/paddle/fluid/operators/fetch_barrier_op.cc +++ b/paddle/fluid/operators/fetch_barrier_op.cc @@ -45,13 +45,13 @@ class FetchBarrierOp : public framework::OperatorBase { auto rpc_client = detail::RPCClient::GetInstance(); - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); for (auto& ep : eps) { VLOG(3) << "fetch barrier, ep: " << ep; rpc_client->AsyncSendFetchBarrier(ep); } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } }; diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index e21b57258928856a10d6e86c3e2c6e81fb241ee3..aa3e05b83b23569a4dd9c83294916e289f993abc 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -33,7 +33,6 @@ class GatherOp : public framework::OperatorWithKernel { auto index_dims = ctx->GetInputDim("Index"); PADDLE_ENFORCE(index_dims.size() == 1); int batch_size = ctx->GetInputDim("Index")[0]; - PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx->GetInputDim("X")); output_dims[0] = batch_size; ctx->SetOutputDim("Out", output_dims); diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 71e75c25321812c849e205460217b174d80654be..66a0f87b46c6447bac7e42f0f61e3170cb1f2fdb 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -222,8 +222,8 @@ static void FillRequestCtx(detail::RequestHandler *h, framework::Scope *scope, h->SetDevCtx(dev_ctx); h->SetExecutor(executor); h->SetProgram(program); - h->SetPrefetchPreparedCtx(std::move( - std::unique_ptr(prefetch_ctx))); + h->SetPrefetchPreparedCtx( + std::unique_ptr(prefetch_ctx)); h->SetRPCServer(rpc_server); } diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index d60a99994edc926456706ff6a3ba998a3e5e7dd5..be55bc43b14f1e6211f71b4080d1676838ad508c 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -135,7 +135,11 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { PoolingMode pooling_mode; if (pooling_type == "max") { - pooling_mode = PoolingMode::kMaximum; + if (FLAGS_cudnn_deterministic) { + pooling_mode = PoolingMode::kMaximumDeterministic; + } else { + pooling_mode = PoolingMode::kMaximum; + } } else { pooling_mode = PoolingMode::kAverage; } diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc index e0a9b24ac8978418a1a4ece62286e022bec8b834..167a06e090c1d5a15f502098e5fe4968693bcc04 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/prefetch_op.cc @@ -53,7 +53,7 @@ class PrefetchOp : public framework::OperatorBase { VLOG(3) << "don't send no-initialied variable: " << ins[i]; } } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } }; diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 8c0dac65dd691954b112bfa61622d399b2b9c3e5..31e5d81e55ed9703eb3a9ef2595fa2a280f1a734 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -26,7 +26,11 @@ class MultiFileReader : public framework::ReaderBase { MultiFileReader(const std::vector& file_names, const std::vector& dims, size_t thread_num, size_t buffer_size) - : file_names_(file_names), dims_(dims), buffer_size_(buffer_size) { + : buffer_size_(buffer_size) { + readers_.reserve(file_names.size()); + for (const std::string& f_name : file_names) { + readers_.emplace_back(CreateReaderByFileName(f_name, dims)); + } prefetchers_.resize(thread_num); StartNewScheduler(); } @@ -40,14 +44,13 @@ class MultiFileReader : public framework::ReaderBase { void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); - void PrefetchThreadFunc(std::string file_name, size_t thread_idx); + void PrefetchThreadFunc(size_t reader_idx, size_t thread_idx); - std::vector file_names_; - std::vector dims_; + std::vector> readers_; std::thread scheduler_; std::vector prefetchers_; size_t buffer_size_; - reader::BlockingQueue* waiting_file_idx_; + reader::BlockingQueue* waiting_reader_idx_; reader::BlockingQueue* available_thread_idx_; reader::BlockingQueue>* buffer_; }; @@ -65,15 +68,15 @@ void MultiFileReader::ReInit() { void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); - waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); + waiting_reader_idx_ = new reader::BlockingQueue(readers_.size()); available_thread_idx_ = new reader::BlockingQueue(thread_num); buffer_ = new reader::BlockingQueue>( buffer_size_); - for (size_t i = 0; i < file_names_.size(); ++i) { - waiting_file_idx_->Send(i); + for (size_t i = 0; i < readers_.size(); ++i) { + waiting_reader_idx_->Send(i); } - waiting_file_idx_->Close(); + waiting_reader_idx_->Close(); for (size_t i = 0; i < thread_num; ++i) { available_thread_idx_->Send(i); } @@ -84,13 +87,13 @@ void MultiFileReader::StartNewScheduler() { void MultiFileReader::EndScheduler() { available_thread_idx_->Close(); buffer_->Close(); - waiting_file_idx_->Close(); + waiting_reader_idx_->Close(); if (scheduler_.joinable()) { scheduler_.join(); } delete buffer_; delete available_thread_idx_; - delete waiting_file_idx_; + delete waiting_reader_idx_; } void MultiFileReader::ScheduleThreadFunc() { @@ -102,12 +105,11 @@ void MultiFileReader::ScheduleThreadFunc() { if (prefetcher.joinable()) { prefetcher.join(); } - size_t file_idx; - if (waiting_file_idx_->Receive(&file_idx)) { + size_t reader_idx; + if (waiting_reader_idx_->Receive(&reader_idx)) { // Still have files to read. Start a new prefetch thread. - std::string file_name = file_names_[file_idx]; - prefetcher = std::thread([this, file_name, thread_idx] { - PrefetchThreadFunc(file_name, thread_idx); + prefetcher = std::thread([this, reader_idx, thread_idx] { + PrefetchThreadFunc(reader_idx, thread_idx); }); } else { // No more file to read. @@ -129,23 +131,22 @@ void MultiFileReader::ScheduleThreadFunc() { VLOG(5) << "MultiFileReader schedule thread terminates."; } -void MultiFileReader::PrefetchThreadFunc(std::string file_name, - size_t thread_idx) { - VLOG(5) << "The prefetch thread of file '" << file_name << "' starts."; - std::unique_ptr reader = - CreateReaderByFileName(file_name, dims_); +void MultiFileReader::PrefetchThreadFunc(size_t reader_idx, size_t thread_idx) { + VLOG(5) << "The prefetch thread of file idx '" << reader_idx << "' starts."; + std::unique_ptr& reader = readers_[reader_idx]; while (true) { std::vector ins; reader->ReadNext(&ins); if (ins.empty()) { + reader->ReInit(); break; } try { buffer_->Send(std::move(ins)); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " - "thread of file '" - << file_name << "' will terminate."; + "thread of file idx '" + << reader_idx << "' will terminate."; break; } } @@ -154,7 +155,8 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " "Fail to send thread_idx."; } - VLOG(5) << "The prefetch thread of file '" << file_name << "' terminates."; + VLOG(5) << "The prefetch thread of file idx '" << reader_idx + << "' terminates."; } class OpenFilesOp : public framework::OperatorBase { diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index d8ddb7b448910b5e0e6e71742eb2fdc6a225c919..49b480948a788dc22f95a4eafc6f780298d7c5f9 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -51,7 +51,7 @@ class RecvOp : public framework::OperatorBase { rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } if (sync_mode) { - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } } }; diff --git a/paddle/fluid/operators/reduce_op.h b/paddle/fluid/operators/reduce_op.h index cd19cc1460a6b4d4201f21f6f27f988c1547b88a..7df47f316c30b9eb2644677681b91023e1838548 100644 --- a/paddle/fluid/operators/reduce_op.h +++ b/paddle/fluid/operators/reduce_op.h @@ -135,15 +135,16 @@ class ReduceKernel : public framework::OpKernel { } else { int ndim = context.Input("X")->dims().size(); int rdim = context.Attr>("dim").size(); - HANDLE_DIM(6, 5); - HANDLE_DIM(6, 4); - HANDLE_DIM(6, 3); - HANDLE_DIM(6, 2); - HANDLE_DIM(6, 1); - HANDLE_DIM(5, 4); - HANDLE_DIM(5, 3); - HANDLE_DIM(5, 2); - HANDLE_DIM(5, 1); + // comments for accelerating compiling temporarily. + // HANDLE_DIM(6, 5); + // HANDLE_DIM(6, 4); + // HANDLE_DIM(6, 3); + // HANDLE_DIM(6, 2); + // HANDLE_DIM(6, 1); + // HANDLE_DIM(5, 4); + // HANDLE_DIM(5, 3); + // HANDLE_DIM(5, 2); + // HANDLE_DIM(5, 1); HANDLE_DIM(4, 3); HANDLE_DIM(4, 2); HANDLE_DIM(4, 1); diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index bcd8e81609a37cc544f5a5cc4188400c1632a668..2bc38ff4e3e6ee32bb2b0dbf4daa6d871dbaebfd 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -49,13 +49,13 @@ class SendBarrierOp : public framework::OperatorBase { VLOG(3) << "SendBarrierOp sync_mode:" << sync_mode; // need to wait before sending send_barrier message - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); if (sync_mode) { for (auto& ep : eps) { VLOG(3) << "send barrier, ep: " << ep; rpc_client->AsyncSendBatchBarrier(ep); } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } } }; diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index a5150f242ca3b0befafa2443f0bc466e2aea85e4..a91b1453896f951be58797071d9a5928633ccdcf 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -59,14 +59,14 @@ class SendOp : public framework::OperatorBase { VLOG(3) << "don't send no-initialied variable: " << ins[i]; } } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); if (sync_mode) { for (auto& ep : endpoints) { VLOG(3) << "batch barrier, ep: " << ep; rpc_client->AsyncSendBatchBarrier(ep); } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } if (outs.size() > 0) { @@ -74,13 +74,13 @@ class SendOp : public framework::OperatorBase { VLOG(2) << "getting " << outs[i] << " from " << epmap[i]; rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); // tell pservers that current trainer have called fetch for (auto& ep : endpoints) { VLOG(2) << "send fetch barrier, ep: " << ep; rpc_client->AsyncSendFetchBarrier(ep); } - PADDLE_ENFORCE(rpc_client->Wait()); + rpc_client->Wait(); } } }; diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index f9e0596191d0b86686e0fa36265806111c774b38..2685ce217ee0f0d3e89f3751e96218dcd19bead4 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -114,7 +114,7 @@ class SGDOpKernel : public framework::OpKernel { int64_t id_index = param.Index(grad.rows()[i]); PADDLE_ENFORCE_GE(id_index, static_cast(0), "id should be in the table"); - for (size_t j = 0; j < grad_row_width; j++) { + for (int64_t j = 0; j < grad_row_width; j++) { out_data[id_index * grad_row_width + j] -= lr[0] * grad_data[i * grad_row_width + j]; } diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c75fce7959d1af51afd52af23fe657d10a2f3988 --- /dev/null +++ b/paddle/fluid/operators/shape_op.cc @@ -0,0 +1,54 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/shape_op.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class ShapeOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input (Input) of get_shape op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output (Out) of get_shape op should not be null."); + auto in_dim = ctx->GetInputDim("Input"); + ctx->SetOutputDim("Out", {in_dim.size()}); + } +}; + +class ShapeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Input", "(Tensor), The input tensor."); + AddOutput("Out", "(Tensor), The shape of input tensor."); + AddComment(R"DOC( +Shape Operator. +Get the shape of input tensor. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(shape, ops::ShapeOp, ops::ShapeOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(shape, ops::ShapeKernel, ops::ShapeKernel, + ops::ShapeKernel, ops::ShapeKernel); diff --git a/paddle/fluid/operators/shape_op.cu b/paddle/fluid/operators/shape_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..7736a2a1e13cfa5d445411b3efac7669a7bf23a2 --- /dev/null +++ b/paddle/fluid/operators/shape_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/shape_op.h" + +REGISTER_OP_CUDA_KERNEL(shape, paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel); diff --git a/paddle/fluid/operators/shape_op.h b/paddle/fluid/operators/shape_op.h new file mode 100644 index 0000000000000000000000000000000000000000..3be86b66a538e7b38a5d59095fee7e7636364bce --- /dev/null +++ b/paddle/fluid/operators/shape_op.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class ShapeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_t = ctx.Input("Input"); + auto* out_t = ctx.Output("Out"); + auto out_data = out_t->mutable_data(platform::CPUPlace()); + auto in_dims = in_t->dims(); + for (int i = 0; i < in_dims.size(); ++i) { + out_data[i] = in_dims[i]; + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc index 83e768b4dc9c607b0f73d7183462d772ae7ab994..855157e7c4c5c4a43091d28d3a5414e6e386b727 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -31,8 +31,9 @@ void paddle::operators::TensorRTEngineKernel::Prepare( auto max_workspace = context.Attr("max_workspace"); engine_.reset(new inference::tensorrt::TensorRTEngine( max_batch_, max_workspace, nullptr)); + // TODO(Superjomn) parameters should be passed after analysised from outside. inference::Singleton::Global().ConvertBlock( - block, engine_.get()); + block, {}, context.scope(), engine_.get()); engine_->FreezeNetwork(); } diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/test_send_nccl_id.cc index a845ba2eb038fa6a8e70dfbac06c31c19dbb9e3e..eb01ac9b9072b1bbd4115d60a2101d2f1cbcf93a 100644 --- a/paddle/fluid/operators/test_send_nccl_id.cc +++ b/paddle/fluid/operators/test_send_nccl_id.cc @@ -61,7 +61,6 @@ void StartServer() { std::bind(&detail::AsyncGRPCServer::StartServer, g_rpc_service.get())); g_rpc_service->SetCond(detail::kRequestSend); - std::cout << "before WaitFanInOfSend" << std::endl; g_rpc_service->WaitBarrier(detail::kRequestSend); LOG(INFO) << "got nccl id and stop server..."; @@ -88,12 +87,12 @@ TEST(SendNcclId, GrpcServer) { int port = g_rpc_service->GetSelectedPort(); std::string ep = string::Sprintf("127.0.0.1:%d", port); - detail::RPCClient client; - LOG(INFO) << "connect to server" << ep; - client.AsyncSendVariable(ep, dev_ctx, scope, NCCL_ID_VARNAME); - client.Wait(); - client.AsyncSendBatchBarrier(ep); - client.Wait(); + detail::RPCClient* client = detail::RPCClient::GetInstance(); + LOG(INFO) << "connect to server " << ep; + client->AsyncSendVariable(ep, dev_ctx, scope, NCCL_ID_VARNAME); + client->Wait(); + client->AsyncSendBatchBarrier(ep); + client->Wait(); server_thread.join(); g_rpc_service.reset(nullptr); diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index c0d399d078f73743836fc2a0c1d4b1e6b31ecd83..0f4a7c8485b21e36dac46c5a87c2445275a3195e 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -22,6 +22,8 @@ limitations under the License. */ #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/macros.h" +DECLARE_bool(cudnn_deterministic); + namespace paddle { namespace platform { @@ -76,8 +78,22 @@ enum class DataLayout { // Not use enum class PoolingMode { kMaximum, kAverage, + kMaximumDeterministic, }; +inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { + switch (mode) { + case PoolingMode::kMaximumDeterministic: + return CUDNN_POOLING_MAX_DETERMINISTIC; + case PoolingMode::kAverage: + return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kMaximum: + return CUDNN_POOLING_MAX; + default: + PADDLE_THROW("Unexpected pooling mode."); + } +} + template class CudnnDataType; @@ -293,9 +309,7 @@ class ScopedPoolingDescriptor { PADDLE_ENFORCE_EQ(kernel.size(), pads.size()); PADDLE_ENFORCE_EQ(kernel.size(), strides.size()); PADDLE_ENFORCE(dynload::cudnnSetPoolingNdDescriptor( - desc_, (mode == PoolingMode::kMaximum - ? CUDNN_POOLING_MAX - : CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING), + desc_, (GetPoolingMode(mode)), CUDNN_PROPAGATE_NAN, // Always propagate nans. kernel.size(), kernel.data(), pads.data(), strides.data())); return desc_; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 1f733d71bdfb777d4a2f316a5fefc3c874879862..6c50ab2685c56bafe146c67fe2ef081ee4c55628 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -175,7 +175,6 @@ CUDADeviceContext::~CUDADeviceContext() { Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { - std::lock_guard guard(mutex_); PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index a9c1984616bc731e0557f2cb89282423aa9c3bac..6b82d93237b6baa20703c5b54b56f5381dd858df 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -100,7 +100,6 @@ class CUDADeviceContext : public DeviceContext { template void RecordEvent(cudaEvent_t ev, Callback callback) { - std::lock_guard guard(mutex_); callback(); PADDLE_ENFORCE(cudaEventRecord(ev, stream_)); } @@ -110,8 +109,6 @@ class CUDADeviceContext : public DeviceContext { std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; - - mutable std::recursive_mutex mutex_; cudaStream_t stream_; cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 81acaff87d3c2025cf0d6185a1590b018bfbd83c..25bcda7eedc1ef42f75fb8fd1439f0c8f55015c3 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -45,7 +45,7 @@ extern void *cublas_dso_handle; std::call_once(cublas_dso_flag, []() { \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ }); \ - void *p_##__name = dlsym(cublas_dso_handle, #__name); \ + static void *p_##__name = dlsym(cublas_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 34d83e395694f55eafca74d63ebf363169ab30e8..77e46fa768b62c277d7b4027de7173e39a5672b4 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -39,7 +39,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ }); \ EnforceCUDNNLoaded(#__name); \ - void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ + static void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index e64de7c20fc9d145e51cfc4528e321b3c4ec86c8..2ad52bc7d328f1d05b1bf1dcd4bb39a7c67b8179 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -45,7 +45,7 @@ extern void *cupti_dso_handle; std::call_once(cupti_dso_flag, []() { \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ }); \ - void *p_##__name = dlsym(cupti_dso_handle, #__name); \ + static void *p_##__name = dlsym(cupti_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 46ad4379d5f9572d415ef1d747077217ae29391e..5b9e0820e0b319fe7a636a57a0029caf038b4db3 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -34,7 +34,7 @@ extern void *curand_dso_handle; std::call_once(curand_dso_flag, []() { \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ }); \ - void *p_##__name = dlsym(curand_dso_handle, #__name); \ + static void *p_##__name = dlsym(curand_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index 37902ae20c5d9d64486232bbd468375c4a50a615..575516f81870fc9f7b92919ffc20a201cb5cbce8 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -37,7 +37,7 @@ extern void* nccl_dso_handle; std::call_once(nccl_dso_flag, []() { \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ }); \ - void* p_##__name = dlsym(nccl_dso_handle, #__name); \ + static void* p_##__name = dlsym(nccl_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/tensorrt.h b/paddle/fluid/platform/dynload/tensorrt.h index f584a49da0fefe0b064b5fb55b01ec132225ce5e..5d67658b94af75680a100e13eed7b6b052162e00 100644 --- a/paddle/fluid/platform/dynload/tensorrt.h +++ b/paddle/fluid/platform/dynload/tensorrt.h @@ -40,7 +40,7 @@ extern void* tensorrt_dso_handle; paddle::platform::dynload::GetTensorRtDsoHandle(); \ PADDLE_ENFORCE(tensorrt_dso_handle, "load tensorrt so failed"); \ }); \ - void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ + static void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ PADDLE_ENFORCE(p_##__name, "load %s failed", #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index 7c70649d21c547beb824576d4a8ecf6219a9bddf..d157c1fda789b98f06ad069d2a9c4f421ff82dcd 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -40,7 +40,7 @@ extern void* warpctc_dso_handle; std::call_once(warpctc_dso_flag, []() { \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ }); \ - void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ + static void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ return reinterpret_cast(p_##_name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 3d8d64e4c2758675067834810ebb9aee1e88fdb9..01de9d7041bf3eb40884e2a6295027cccfaebd2a 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -127,6 +127,7 @@ double Event::CpuElapsedMs(const Event& e) const { double Event::CudaElapsedMs(const Event& e) const { #ifdef PADDLE_WITH_CUDA + if (!has_cuda_) return 0.0; PADDLE_ENFORCE(e.has_cuda() && has_cuda()); PADDLE_ENFORCE(e.device() == device()); PADDLE_ENFORCE(cudaEventSynchronize(event_)); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 3af8941be69fe507bc105e26b608ec768e4b5998..03cf417b62f96fd6812b3eac497ffdf9a484f5eb 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -519,6 +519,14 @@ All parameter, weight, gradient are variables in Paddle. [](const ExecutionStrategy &self) { return self.allow_op_delay_; }, [](ExecutionStrategy &self, bool allow_op_delay) { self.allow_op_delay_ = allow_op_delay; + }) + .def_property( + "num_iteration_per_drop_scope", + [](const ExecutionStrategy &self) { + return self.num_iteration_per_drop_scope_; + }, + [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) { + self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope; }); py::class_ build_strategy(pe, "BuildStrategy"); diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index 82d9aa601cf450b8f90573d6c582bb12ced7a48a..6c65d9160c059ac143ee258b2bdaed5915a1dca1 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -119,40 +119,56 @@ bool Chunk::Write(std::ostream& os, Compressor ct) const { } bool Chunk::Parse(std::istream& sin) { - Header hdr; - bool ok = hdr.Parse(sin); + ChunkParser parser(sin); + if (!parser.Init()) { + return false; + } + Clear(); + while (parser.HasNext()) { + Add(parser.Next()); + } + return true; +} + +ChunkParser::ChunkParser(std::istream& sin) : in_(sin) {} +bool ChunkParser::Init() { + pos_ = 0; + bool ok = header_.Parse(in_); if (!ok) { return ok; } - auto beg_pos = sin.tellg(); - uint32_t crc = Crc32Stream(sin, hdr.CompressSize()); - PADDLE_ENFORCE_EQ(hdr.Checksum(), crc); - Clear(); - sin.seekg(beg_pos, sin.beg); - std::unique_ptr compressed_stream; - switch (hdr.CompressType()) { + auto beg_pos = in_.tellg(); + uint32_t crc = Crc32Stream(in_, header_.CompressSize()); + PADDLE_ENFORCE_EQ(header_.Checksum(), crc); + in_.seekg(beg_pos, in_.beg); + + switch (header_.CompressType()) { case Compressor::kNoCompress: break; case Compressor::kSnappy: - compressed_stream.reset(new snappy::iSnappyStream(sin)); + compressed_stream_.reset(new snappy::iSnappyStream(in_)); break; default: PADDLE_THROW("Not implemented"); } + return true; +} - std::istream& stream = compressed_stream ? *compressed_stream : sin; +bool ChunkParser::HasNext() const { return pos_ < header_.NumRecords(); } - for (uint32_t i = 0; i < hdr.NumRecords(); ++i) { - uint32_t rec_len; - stream.read(reinterpret_cast(&rec_len), sizeof(uint32_t)); - std::string buf; - buf.resize(rec_len); - stream.read(&buf[0], rec_len); - PADDLE_ENFORCE_EQ(rec_len, stream.gcount()); - Add(buf); +std::string ChunkParser::Next() { + if (!HasNext()) { + return ""; } - return true; + ++pos_; + std::istream& stream = compressed_stream_ ? *compressed_stream_ : in_; + uint32_t rec_len; + stream.read(reinterpret_cast(&rec_len), sizeof(uint32_t)); + std::string buf; + buf.resize(rec_len); + stream.read(&buf[0], rec_len); + PADDLE_ENFORCE_EQ(rec_len, stream.gcount()); + return buf; } - } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/chunk.h b/paddle/fluid/recordio/chunk.h index 71a1556a33bfa5c937d6a799d2818cd5a5ef2094..cfb954a591679c2d2c4f42ecd99ca0c8bd1084cf 100644 --- a/paddle/fluid/recordio/chunk.h +++ b/paddle/fluid/recordio/chunk.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include #include #include @@ -53,9 +54,20 @@ class Chunk { DISABLE_COPY_AND_ASSIGN(Chunk); }; -size_t CompressData(const char* in, size_t in_length, Compressor ct, char* out); +class ChunkParser { + public: + explicit ChunkParser(std::istream& sin); + + bool Init(); + std::string Next(); + bool HasNext() const; -void DeflateData(const char* in, size_t in_length, Compressor ct, char* out); + private: + Header header_; + uint32_t pos_{0}; + std::istream& in_; + std::unique_ptr compressed_stream_; +}; } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/scanner.cc b/paddle/fluid/recordio/scanner.cc index 88b4d4001bc1b6dc935a9aabc2db5edfb55a60e4..06a13e6c5b6ea76456e231e3f7b1eb33492b16ea 100644 --- a/paddle/fluid/recordio/scanner.cc +++ b/paddle/fluid/recordio/scanner.cc @@ -22,35 +22,33 @@ namespace paddle { namespace recordio { Scanner::Scanner(std::unique_ptr &&stream) - : stream_(std::move(stream)) { + : stream_(std::move(stream)), parser_(*stream_) { Reset(); } -Scanner::Scanner(const std::string &filename) { - stream_.reset(new std::ifstream(filename)); +Scanner::Scanner(const std::string &filename) + : stream_(new std::ifstream(filename)), parser_(*stream_) { Reset(); } void Scanner::Reset() { stream_->clear(); stream_->seekg(0, std::ios::beg); - ParseNextChunk(); + parser_.Init(); } std::string Scanner::Next() { - PADDLE_ENFORCE(!eof_, "StopIteration"); - auto rec = cur_chunk_.Record(offset_++); - if (offset_ == cur_chunk_.NumRecords()) { - ParseNextChunk(); + if (stream_->eof()) { + return ""; } - return rec; -} -void Scanner::ParseNextChunk() { - eof_ = !cur_chunk_.Parse(*stream_); - offset_ = 0; + auto res = parser_.Next(); + if (!parser_.HasNext() && HasNext()) { + parser_.Init(); + } + return res; } -bool Scanner::HasNext() const { return !eof_; } +bool Scanner::HasNext() const { return !stream_->eof(); } } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/scanner.h b/paddle/fluid/recordio/scanner.h index 34f1b0c78d6b5af6072a993579e1866d38c6d009..0d885dd87a2f819ba1d9f76259196f6cfff0b2a0 100644 --- a/paddle/fluid/recordio/scanner.h +++ b/paddle/fluid/recordio/scanner.h @@ -37,11 +37,7 @@ class Scanner { private: std::unique_ptr stream_; - Chunk cur_chunk_; - size_t offset_; - bool eof_; - - void ParseNextChunk(); + ChunkParser parser_; }; } // namespace recordio } // namespace paddle diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 8eeea1805d8610f6f27f422337f3526688b73de3..113d02ce4865877d9385da31d996c0985c348716 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -145,19 +145,17 @@ function check_style() { trap 'abort' 0 set -e - # install glide - curl https://glide.sh/get | bash - eval "$(GIMME_GO_VERSION=1.8.3 gimme)" + if [ -x "$(command -v gimme)" ]; then + eval "$(GIMME_GO_VERSION=1.8.3 gimme)" + fi # set up go environment for running gometalinter mkdir -p $GOPATH/src/github.com/PaddlePaddle/ ln -sf ${PADDLE_ROOT} $GOPATH/src/github.com/PaddlePaddle/Paddle - cd $GOPATH/src/github.com/PaddlePaddle/Paddle/go; glide install; cd - - - go get github.com/alecthomas/gometalinter - gometalinter --install + mkdir -p ./build/go + cp go/glide.* build/go + cd build/go; glide install; cd - - cd ${PADDLE_ROOT} export PATH=/usr/bin:$PATH pre-commit install clang-format --version diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 317cf037c69f8639e3760fbfce20565127794fcb..d48c54fcbb66487617b1946bc69724870c8f879c 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -15,7 +15,7 @@ __all__ = ['batch'] -def batch(reader, batch_size): +def batch(reader, batch_size, drop_last=False): """ Create a batched reader. @@ -23,6 +23,8 @@ def batch(reader, batch_size): :type reader: callable :param batch_size: size of each mini-batch :type batch_size: int + :param drop_last: drop the last batch, if the size of last batch is not equal to batch_size. + :type drop_last: bool :return: the batched reader. :rtype: callable """ @@ -35,7 +37,7 @@ def batch(reader, batch_size): if len(b) == batch_size: yield b b = [] - if b: + if drop_last == False and len(b) != 0: yield b return batch_reader diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index aece8fc1490994425153126333e2826ff5533637..68aee304a6761e97a0dab4183611d9d07152da16 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -45,8 +45,8 @@ import transpiler from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace -from transpiler import DistributeTranspiler, SimpleDistributeTranspiler, \ - InferenceTranspiler, memory_optimize, release_memory +from transpiler import DistributeTranspiler, InferenceTranspiler, \ + memory_optimize, release_memory from concurrency import (Go, make_channel, channel_send, channel_recv, channel_close, Select) from lod_tensor import create_lod_tensor, create_random_int_lodtensor @@ -121,7 +121,7 @@ def __bootstrap__(): ] if core.is_compiled_with_cuda(): read_env_flags += [ - 'fraction_of_gpu_memory_to_use', 'cudnn_algo_use_autotune' + 'fraction_of_gpu_memory_to_use', 'cudnn_deterministic' ] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index cb87653c47727052bb7835dcbe0a590887a560c1..56f6f26803919a171f6459c909e6bb71ab63b180 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -81,7 +81,10 @@ __all__ = [ 'label_smooth', 'roi_pool', 'dice_loss', - 'upsampling_bilinear2d', + 'image_resize', + 'image_resize_short', + 'resize_bilinear', + 'gather', 'random_crop', ] @@ -3889,7 +3892,6 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): def dice_loss(input, label, epsilon=0.00001): """ - **Dice loss Layer** Dice loss for comparing the similarity of two batch of data, usually is used for binary image segmentation i.e. labels are binary. The dice loss can be defined as below equation: @@ -3929,30 +3931,35 @@ def dice_loss(input, label, epsilon=0.00001): return reduce_mean(dice_score) -def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): +def image_resize(input, + out_shape=None, + scale=None, + name=None, + resample='BILINEAR'): """ - The mathematical meaning of upsampling_bilinear2d is also called - Bilinear interpolation. - Bilinear interpolation is an extension of linear interpolation for - interpolating functions of two variables (e.g. H-direction and - W-direction in this layer) on a rectilinear 2D grid. + Resize a batch of images. - For details, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Bilinear_interpolation + The input must be a tensor of the shape (num_batches, channels, in_h, in_w), + and the resizing only applies on the last two dimensions(hight and width). + + Supporting resample methods: + 'BILINEAR' : Bilinear interpolation Args: - input (Variable): The input tensor of bilinear interpolation, + input (Variable): The input tensor of image resize layer, This is a 4-D tensor of the shape (num_batches, channels, in_h, in_w). - out_shape(list|tuple|Variable|None): Output shape of bilinear interpolation + out_shape(list|tuple|Variable|None): Output shape of image resize layer, the shape is (out_h, out_w). Default: None - scale(int|None): The multiplier for the input height or width. + scale(float|None): The multiplier for the input height or width. At least one of out_shape or scale must be set. And out_shape has a higher priority than scale. Default: None name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. + resample(str): The resample method. It can only be 'BILINEAR' currently. + Default: 'BILINEAR' Returns: out (Variable): The output is a 4-D tensor of the shape @@ -3961,8 +3968,12 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): Examples: .. code-block:: python - out = fluid.layers.bilinear_interp(input, out_shape=[12, 12]) + out = fluid.layers.image_resize(input, out_shape=[12, 12]) """ + resample_methods = {'BILINEAR': 'bilinear_interp'} + if resample not in resample_methods: + raise ValueError( + "The 'resample' of image_resize can only be 'BILINEAR' currently.") if out_shape is None and scale is None: raise ValueError("One of out_shape and scale must not be None") helper = LayerHelper('bilinear_interp', **locals()) @@ -3975,10 +3986,9 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): out_w = 0 inputs = {"X": input} if out_shape is not None: - if not (_is_list_or_turple_(out_shape) and len(out_shape) == 2) and ( - out_shape is not Variable): - raise ValueError('out_shape should be a list or tuple ', - 'with length 2, (out_h, out_w).') + if not (_is_list_or_turple_(out_shape) and + len(out_shape) == 2) and not isinstance(out_shape, Variable): + raise ValueError('out_shape should be a list or tuple or variable') if _is_list_or_turple_(out_shape): out_shape = list(map(int, out_shape)) out_h = out_shape[0] @@ -3991,7 +4001,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): out = helper.create_tmp_variable(dtype) helper.append_op( - type="bilinear_interp", + type=resample_methods[resample], inputs=inputs, outputs={"Out": out}, attrs={"out_h": out_h, @@ -3999,6 +4009,104 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None): return out +def resize_bilinear(input, out_shape=None, scale=None, name=None): + """ + This is an alias of layer 'image_resize' with bilinear interpolation. + + The mathematical meaning of resize bilinear layer is + Bilinear interpolation. + Bilinear interpolation is an extension of linear interpolation for + interpolating functions of two variables (e.g. H-direction and + W-direction in this layer) on a rectilinear 2D grid. + + For details, please refer to Wikipedia: + https://en.wikipedia.org/wiki/Bilinear_interpolation + """ + + return image_resize(input, out_shape, scale, name, 'BILINEAR') + + +def image_resize_short(input, out_short_len, resample='BILINEAR'): + """ + Resize a batch of images. The short edge of input images will be + resized to the given 'out_short_len'. The long edge of input images + will be resized proportionately to make images' length-width ratio + constant. + + Args: + input (Variable): The input tensor of image resize layer, + This is a 4-D tensor of the shape + (num_batches, channels, in_h, in_w). + out_short_len(int): The length of output images' short edge. + + Returns: + out (Variable): The output is a 4-D tensor of the shape + (num_batches, channls, out_h, out_w). + """ + in_shape = input.shape + if len(in_shape) != 4: + raise ValueError( + "The rank of input must be 4 (num_batches, channels, in_h, in_w).") + hw = in_shape[2:4] + short_idx = hw.index(min(hw)) + long_idx = 1 - short_idx + out_shape = list(hw) + out_shape[short_idx] = out_short_len + out_shape[long_idx] = int( + float(out_shape[long_idx]) * (float(out_short_len) / float(hw[ + short_idx])) + 0.5) + return image_resize(input=input, out_shape=out_shape, resample=resample) + + +def gather(input, index): + """ + Output is obtained by gathering entries of the outer-most dimension + of X indexed by `index` and concatenate them together. + + .. math:: + + Out = X[Index] + + + .. code-block:: text + + + Given: + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + Then: + + Out = [[3, 4], + [5, 6]] + + Args: + input (Variable): The source input with rank>=1. + index (Variable): The index input with rank=1. + + Returns: + output (Variable): The output is a tensor with the same rank as input. + + Examples: + .. code-block:: python + + output = fluid.layers.gather(x, index) + """ + helper = LayerHelper('gather', **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + helper.append_op( + type="gather", + inputs={"X": input, + "Index": index}, + outputs={"Out": out}) + return out + + def random_crop(input, shape, seed=1): helper = LayerHelper("random_crop", **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index a9fe25744cc0b385479c9366af1b731ec221dd5a..69cfde852dd087bb9192da1f7582f925582dbce4 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -71,6 +71,8 @@ __all__ = [ 'cumsum', 'scatter', 'sum', + 'polygon_box_transform', + 'shape', ] + __activations__ for _OP in set(__all__): diff --git a/python/paddle/fluid/recordio_writer.py b/python/paddle/fluid/recordio_writer.py index 5accaacd5361165d30b92c71ae4fd62e23e44e07..8d48e9abef0fb9861284c6302b30efb0e3994989 100644 --- a/python/paddle/fluid/recordio_writer.py +++ b/python/paddle/fluid/recordio_writer.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import core import contextlib - -__all__ = ['convert_reader_to_recordio_file'] +__all__ = [ + 'convert_reader_to_recordio_file', 'convert_reader_to_recordio_files' +] @contextlib.contextmanager @@ -46,3 +48,36 @@ def convert_reader_to_recordio_file( writer.complete_append_tensor() counter += 1 return counter + + +def convert_reader_to_recordio_files( + filename, + batch_per_file, + reader_creator, + feeder, + compressor=core.RecordIOWriter.Compressor.Snappy, + max_num_records=1000, + feed_order=None): + if feed_order is None: + feed_order = feeder.feed_names + f_name, f_ext = os.path.splitext(filename) + assert (f_ext == ".recordio") + + lines = [] + f_idx = 0 + counter = 0 + for idx, batch in enumerate(reader_creator()): + lines.append(batch) + if idx >= batch_per_file and idx % batch_per_file == 0: + filename = "%s-%05d%s" % (f_name, f_idx, f_ext) + with create_recordio_writer(filename, compressor, + max_num_records) as writer: + for l in lines: + res = feeder.feed(l) + for each in feed_order: + writer.append_tensor(res[each]) + writer.complete_append_tensor() + counter += 1 + lines = [] + f_idx += 1 + return counter diff --git a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py index de3906fc6a005181b0ab04a846eb2e7ce14004c2..b3117cf2e5e0513089e5e1146d49702fcc8b7ba6 100644 --- a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py @@ -48,13 +48,15 @@ def linear(): return avg_loss +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, - place=place, - optimizer=fluid.optimizer.SGD(learning_rate=0.001)) + train_func=train_program, place=place, optimizer_func=optimizer_func) def event_handler(event): if isinstance(event, fluid.EndStepEvent): diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py index 63dc1b6ce30974ede22a3f7772b76bf207bbae39..2df3da9cca7042222317de626460909f018cb107 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py @@ -85,6 +85,10 @@ def train_network(): return [avg_cost, accuracy] +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): BATCH_SIZE = 128 EPOCH_NUM = 1 @@ -111,9 +115,7 @@ def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, - optimizer=fluid.optimizer.Adam(learning_rate=0.001), - place=place) + train_func=train_program, optimizer_func=optimizer_func, place=place) trainer.train( reader=train_reader, diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py index 0bf8f265a1c1b11364ecfa11061af183ce20d51e..224cca417e717bbcc54b58be6ac0219be207dea3 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py @@ -64,6 +64,10 @@ def train_network(): return [avg_cost, accuracy] +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): BATCH_SIZE = 128 train_reader = paddle.batch( @@ -88,9 +92,7 @@ def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_func=train_program, - place=place, - optimizer=fluid.optimizer.Adam(learning_rate=0.001)) + train_func=train_program, place=place, optimizer_func=optimizer_func) trainer.train( reader=train_reader, diff --git a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py index 8cce398ff33695dc15ae6fb01a887194596af001..0ccb3a39e02ea0c24bdfe01c5eba73b92da88a04 100755 --- a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py +++ b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py @@ -141,12 +141,16 @@ def train_program(): return [avg_cost] +def optimize_func(): + return fluid.optimizer.SGD(learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.SGD(learning_rate=0.01) trainer = fluid.Trainer( - train_func=train_program, place=place, optimizer=optimizer) + train_func=train_program, place=place, optimizer_func=optimize_func) feed_order = [ 'word_data', 'ctx_n2_data', 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', @@ -245,7 +249,7 @@ def infer(use_cuda, inference_program, params_dirname): }, return_numpy=False) - print("infer results: ", np.array(results[0])) + print("infer results: ", np.array(results[0]).shape) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py index d4b723d3e6b619709ab3dc76a32ae87f1cdec274..c4b37df3a09f93fe965ae28ce783f06f5018020d 100644 --- a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py @@ -158,6 +158,13 @@ def train_program(is_sparse): return avg_cost +def optimizer_func(): + return fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) + + def train(use_cuda, is_sparse, is_local=True): EPOCH_NUM = 1 @@ -182,11 +189,8 @@ def train(use_cuda, is_sparse, is_local=True): trainer = fluid.Trainer( train_func=partial(train_program, is_sparse), - optimizer=fluid.optimizer.Adagrad( - learning_rate=1e-4, - regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.1)), - place=place) + place=place, + optimizer_func=optimizer_func) trainer.train( reader=train_reader, diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py index 03439cbd37671b4727879bf3d0793f016f55247a..9a09db25dc0e2c71772aa06e6d0cf993321612e4 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py @@ -57,14 +57,17 @@ def train_program(): return [avg_cost, acc] +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adam(learning_rate=0.001) trainer = fluid.Trainer( train_func=train_program, place=place, - optimizer=optimizer, + optimizer_func=optimizer_func, parallel=True) def event_handler(event): diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py index 89bbd21bea7d64a8dd6fc32829b6addb680da62e..b2b544e791d7ea35ff7d2c9a2dce7ce7f5680f38 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py @@ -44,12 +44,15 @@ def train_program(): return [avg_cost, acc] +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adam(learning_rate=0.001) trainer = fluid.Trainer( - train_func=train_program, place=place, optimizer=optimizer) + train_func=train_program, place=place, optimizer_func=optimizer_func) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): diff --git a/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py index dfc7325acf23176c05fe42761b9997b98d23372a..090c11ce1e79201f0d65d3540527791ab2191d4a 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py +++ b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py @@ -155,12 +155,15 @@ def train_program(): return [avg_cost, scale_infer] +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.2) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.SGD(learning_rate=0.2) trainer = fluid.Trainer( - train_func=train_program, place=place, optimizer=optimizer) + train_func=train_program, place=place, optimizer_func=optimizer_func) feed_order = [ 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py index 11e9fd1bec1450f6753dbe38c7014090d6e136b6..9b61f7a00ce5e2a08c2105fb7f50e6868ef25df3 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py @@ -64,15 +64,18 @@ def train_program(word_dict): return [avg_cost, accuracy] +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, - optimizer=optimizer) + optimizer_func=optimizer_func) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py index 90757d54f99715163518ce5a094e6ba3a67efed3..aa7c567b4d66ba07c26d54436fb305011cfeccf2 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py @@ -79,15 +79,18 @@ def train_program(word_dict): return [avg_cost, accuracy] +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, - optimizer=optimizer) + optimizer_func=optimizer_func) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py index 52b7d4a83779d01936afb3d9d1e4864b05d55b5a..113dda88ca974c9e6241f127091bd96fb2af4a70 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py @@ -71,15 +71,18 @@ def train_program(word_dict): return [avg_cost, accuracy] +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) word_dict = paddle.dataset.imdb.word_dict() trainer = fluid.Trainer( train_func=partial(train_program, word_dict), place=place, - optimizer=optimizer) + optimizer_func=optimizer_func) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): diff --git a/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py index eeb8e67087334ea96aab9cdb6272e34e2eb99939..ba44f72d9b03c3a44560a8a30cba2253256314ef 100644 --- a/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py @@ -80,6 +80,10 @@ def train_program(is_sparse): return avg_cost +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.001) + + def train(use_cuda, train_program, params_dirname): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) @@ -104,9 +108,7 @@ def train(use_cuda, train_program, params_dirname): sys.exit("got NaN loss, training failed.") trainer = fluid.Trainer( - train_func=train_program, - optimizer=fluid.optimizer.SGD(learning_rate=0.001), - place=place) + train_func=train_program, optimizer_func=optimizer_func, place=place) trainer.train( reader=train_reader, diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index a1ca6d981fafb401985d03e9f2d63d1cb41b21b5..fa696acdfa9058af14f0bd34ce1a2980db5aeafc 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -80,21 +80,6 @@ def encoder_decoder(): return rnn() -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = core.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - def main(): rnn_out = encoder_decoder() label = layers.data( @@ -122,18 +107,21 @@ def main(): exe.run(framework.default_startup_program()) + feed_order = [ + 'src_word_id', 'target_language_word', 'target_language_next_word' + ] + + feed_list = [ + fluid.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 for pass_id in xrange(10): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(fluid.default_main_program(), - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + diff --git a/python/paddle/fluid/tests/test_concurrency.py b/python/paddle/fluid/tests/no_test_concurrency.py similarity index 100% rename from python/paddle/fluid/tests/test_concurrency.py rename to python/paddle/fluid/tests/no_test_concurrency.py diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index fead95ffdab25c7ea96b7ef223efc0abf7eea3e3..673bd728718ca233b426fe2aaae307413d875174 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -43,10 +43,10 @@ list(REMOVE_ITEM TEST_OPS test_warpctc_op) list(REMOVE_ITEM TEST_OPS test_dist_train) list(REMOVE_ITEM TEST_OPS test_parallel_executor_crf) list(REMOVE_ITEM TEST_OPS test_parallel_executor_fetch_feed) +# TODO(wuyi): this test hungs on CI, will add it back later +list(REMOVE_ITEM TEST_OPS test_listen_and_serv_op) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) py_test_modules(test_dist_train MODULES test_dist_train SERIAL) -# tests that need to be done in fixed timeout -set_tests_properties(test_listen_and_serv_op PROPERTIES TIMEOUT 20) diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 56f5af91d8e58086c12fde6948229675569aa271..b4c48d85f2c564d877c0a29e64dd2944d2b26ea3 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -19,7 +19,8 @@ import math from op_test import OpTest -def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): +def box_coder(target_box, prior_box, prior_box_var, output_box, code_type, + box_normalized): prior_box_x = ( (prior_box[:, 2] + prior_box[:, 0]) / 2).reshape(1, prior_box.shape[0]) prior_box_y = ( @@ -30,6 +31,9 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): (prior_box[:, 3] - prior_box[:, 1])).reshape(1, prior_box.shape[0]) prior_box_var = prior_box_var.reshape(1, prior_box_var.shape[0], prior_box_var.shape[1]) + if not box_normalized: + prior_box_height = prior_box_height + 1 + prior_box_width = prior_box_width + 1 if (code_type == "EncodeCenterSize"): target_box_x = ((target_box[:, 2] + target_box[:, 0]) / 2).reshape( @@ -40,6 +44,9 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): target_box.shape[0], 1) target_box_height = ((target_box[:, 3] - target_box[:, 1])).reshape( target_box.shape[0], 1) + if not box_normalized: + target_box_height = target_box_height + 1 + target_box_width = target_box_width + 1 output_box[:,:,0] = (target_box_x - prior_box_x) / prior_box_width / \ prior_box_var[:,:,0] @@ -64,9 +71,13 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): output_box[:, :, 1] = target_box_y - target_box_height / 2 output_box[:, :, 2] = target_box_x + target_box_width / 2 output_box[:, :, 3] = target_box_y + target_box_height / 2 + if not box_normalized: + output_box[:, :, 2] = output_box[:, :, 2] - 1 + output_box[:, :, 3] = output_box[:, :, 3] - 1 -def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type): +def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type, + box_normalized): n = target_box.shape[0] m = prior_box.shape[0] output_box = np.zeros((n, m, 4), dtype=np.float32) @@ -74,11 +85,11 @@ def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type): if (code_type == "EncodeCenterSize"): box_coder(target_box[lod[i]:lod[i + 1], :], prior_box, prior_box_var, output_box[lod[i]:lod[i + 1], :, :], - code_type) + code_type, box_normalized) elif (code_type == "DecodeCenterSize"): box_coder(target_box[lod[i]:lod[i + 1], :, :], prior_box, prior_box_var, output_box[lod[i]:lod[i + 1], :, :], - code_type) + code_type, box_normalized) return output_box @@ -93,15 +104,45 @@ class TestBoxCoderOp(OpTest): prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((5, 10, 4)).astype('float32') code_type = "DecodeCenterSize" + box_normalized = False output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type) + lod[0], code_type, box_normalized) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': target_box, } - self.attrs = {'code_type': 'decode_center_size'} + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False + } + self.outputs = {'OutputBox': output_box} + + +class TestBoxCoderOpWithoutBoxVar(OpTest): + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "box_coder" + lod = [[0, 1, 2, 3, 4, 5]] + prior_box = np.random.random((10, 4)).astype('float32') + prior_box_var = np.ones((10, 4)).astype('float32') + target_box = np.random.random((5, 10, 4)).astype('float32') + code_type = "DecodeCenterSize" + box_normalized = False + output_box = batch_box_coder(prior_box, prior_box_var, target_box, + lod[0], code_type, box_normalized) + + self.inputs = { + 'PriorBox': prior_box, + 'TargetBox': target_box, + } + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False + } self.outputs = {'OutputBox': output_box} @@ -116,15 +157,16 @@ class TestBoxCoderOpWithLoD(OpTest): prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((20, 4)).astype('float32') code_type = "EncodeCenterSize" + box_normalized = True output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type) + lod[0], code_type, box_normalized) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': (target_box, lod), } - self.attrs = {'code_type': 'encode_center_size'} + self.attrs = {'code_type': 'encode_center_size', 'box_normalized': True} self.outputs = {'OutputBox': output_box} diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index fa49bd41a5876847d046682dce5c3d3868a18500..32647f9aa81431a3ecc798df6f1360a14fd978af 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -12,40 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.transpiler.distribute_transpiler import delete_ops -import numpy + +from transpiler_test import TranspilerTest -class TestDistTranspiler(unittest.TestCase): +class TestDistTranspiler(TranspilerTest): def setUp(self): - self.trainer_id = 0 - self.trainers = 2 - self.pservers = 2 - self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175" self.current_pserver_ep = "127.0.0.1:6174" - def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w')) - - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) - - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) - return optimize_ops, params_grads - def test_transpiler(self): trainer = self.get_trainer() pserver, startup = self.get_pserver(self.current_pserver_ep) @@ -70,14 +46,6 @@ class TestDistTranspiler(unittest.TestCase): fc_w_var = startup.global_block().var("fc_w.block1") self.assertEqual(fc_w_var.shape, (500, 1000)) - def get_main_program(self): - main = fluid.Program() - - with fluid.program_guard(main): - self.net_conf() - - return main - def get_expect_trainer_ops(self): trainer = fluid.Program() @@ -92,25 +60,6 @@ class TestDistTranspiler(unittest.TestCase): ops.insert(ops.index("elementwise_add_grad") + 1, "send_vars") return ops - def get_trainer(self): - return self._transpiler_instance().get_trainer_program() - - def get_pserver(self, ep): - t = self._transpiler_instance() - pserver = t.get_pserver_program(ep) - startup = t.get_startup_program(ep, pserver) - return pserver, startup - - def _transpiler_instance(self): - main = self.get_main_program() - t = fluid.DistributeTranspiler() - t.transpile( - self.trainer_id, - program=main, - pservers=self.pserver_eps, - trainers=self.trainers) - return t - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 6fd043c27e27db53c95be3630b6c08216e8e35f4..4ae90864806204197c52bbbdc5516f141afd4613 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -20,8 +20,9 @@ from op_test import OpTest class TestGatherOp(OpTest): def setUp(self): self.op_type = "gather" - xnp = np.random.random((10, 20)).astype("float32") - self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} + self.config() + xnp = np.random.random(self.x_shape).astype("float32") + self.inputs = {'X': xnp, 'Index': np.array(self.index).astype("int32")} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} def test_check_output(self): @@ -30,6 +31,16 @@ class TestGatherOp(OpTest): def test_check_grad(self): self.check_grad(['X'], 'Out') + def config(self): + self.x_shape = (10, 20) + self.index = [1, 3, 5] + + +class TestCase1(TestGatherOp): + def config(self): + self.x_shape = (10) + self.index = [1, 3, 5] + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 60dc1f83fc32e2551eb2a04ef35f1c8a0ffec769..621a450fa6a6a8f47e3f1c1de609614b2359c33b 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -369,13 +369,21 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(output) print(str(program)) - def test_upsampling_bilinear2d(self): + def test_resize_bilinear(self): program = Program() with program_guard(program): x = layers.data(name='x', shape=[3, 9, 6], dtype="float32") - output = layers.upsampling_bilinear2d(x, out_shape=[12, 12]) + output = layers.resize_bilinear(x, out_shape=[12, 12]) self.assertIsNotNone(output) - output = layers.upsampling_bilinear2d(x, scale=3) + output = layers.resize_bilinear(x, scale=3) + self.assertIsNotNone(output) + print(str(program)) + + def test_polygon_box_transform(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[8, 4, 4], dtype="float32") + output = layers.polygon_box_transform(input=x) self.assertIsNotNone(output) print(str(program)) diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py index cf89f9d0ebf6200933e539ef7fa8cbdc8f6db058..1226027ddc9c0b9dce9cedc5d1d20c0708647b6f 100644 --- a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py @@ -23,7 +23,7 @@ from multiprocessing import Process from op_test import OpTest -def run_pserver(use_cuda, sync_mode, ip, port, trainer_count, trainer_id): +def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): x = fluid.layers.data(name='x', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') @@ -39,15 +39,8 @@ def run_pserver(use_cuda, sync_mode, ip, port, trainer_count, trainer_id): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - port = os.getenv("PADDLE_INIT_PORT", port) - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS", ip) # ip,ip... - eplist = [] - for ip in pserver_ips.split(","): - eplist.append(':'.join([ip, port])) - pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS", trainer_count)) - current_endpoint = os.getenv("POD_IP", ip) + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID", trainer_id)) + pserver_endpoints = ip + ":" + port + current_endpoint = ip + ":" + port t = fluid.DistributeTranspiler() t.transpile( trainer_id, @@ -62,47 +55,51 @@ def run_pserver(use_cuda, sync_mode, ip, port, trainer_count, trainer_id): class TestListenAndServOp(OpTest): def setUp(self): - self.sleep_time = 5 + self.ps_timeout = 5 self.ip = "127.0.0.1" self.port = "6173" - self.trainer_count = 1 + self.trainers = 1 self.trainer_id = 1 - def _raise_signal(self, parent_pid, raised_signal): - time.sleep(self.sleep_time) - ps_command = subprocess.Popen( - "ps -o pid --ppid %d --noheaders" % parent_pid, - shell=True, - stdout=subprocess.PIPE) - ps_output = ps_command.stdout.read() - retcode = ps_command.wait() - assert retcode == 0, "ps command returned %d" % retcode - - for pid_str in ps_output.split("\n")[:-1]: - try: - os.kill(int(pid_str), raised_signal) - except Exception: - continue - def _start_pserver(self, use_cuda, sync_mode): p = Process( target=run_pserver, - args=(use_cuda, sync_mode, self.ip, self.port, self.trainer_count, + args=(use_cuda, sync_mode, self.ip, self.port, self.trainers, self.trainer_id)) p.start() + return p.pid + + def _wait_ps_ready(self, pid): + retry_times = self.ps_timeout + while True: + assert retry_times >= 0, "wait ps ready failed" + time.sleep(0.5) + try: + # the listen_and_serv_op would touch a file which contains the listen port + # on the /tmp directory until it was ready to process all the RPC call. + os.stat("/tmp/paddle.%d.port" % pid) + return + except os.error: + retry_times -= 1 + + def test_rpc_interfaces(self): + # TODO(Yancey1989): need to make sure the rpc interface correctly. + pass def test_handle_signal_in_serv_op(self): # run pserver on CPU in sync mode - self._start_pserver(False, True) + pid = self._start_pserver(False, True) + self._wait_ps_ready(pid) - # raise SIGINT to pserver - self._raise_signal(os.getpid(), signal.SIGINT) + # raise SIGTERM to pserver + os.kill(pid, signal.SIGTERM) # run pserver on CPU in async mode - self._start_pserver(False, False) + pid = self._start_pserver(False, False) + self._wait_ps_ready(pid) # raise SIGTERM to pserver - self._raise_signal(os.getpid(), signal.SIGTERM) + os.kill(pid, signal.SIGTERM) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py new file mode 100644 index 0000000000000000000000000000000000000000..a62ee050075cb8c9f8817c142825a89c24bdfedf --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -0,0 +1,47 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestShapeOp(OpTest): + def setUp(self): + self.op_type = "shape" + self.config() + self.shape = [2, 3] + input = np.zeros(self.shape) + self.inputs = {'Input': input} + self.outputs = {'Out': np.array(self.shape)} + + def config(self): + self.shape = [2, 3] + + def test_check_output(self): + self.check_output() + + +class case1(TestShapeOp): + def config(self): + self.shape = [2] + + +class case2(TestShapeOp): + def config(self): + self.shape = [1, 2, 3] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py new file mode 100644 index 0000000000000000000000000000000000000000..5ae2844e295194f95701e1cdccd43bf919bf964f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + +import paddle.fluid as fluid +from paddle.fluid.transpiler.distribute_transpiler import delete_ops + +from transpiler_test import TranspilerTest + + +class TestSimpleDistTranspiler(TranspilerTest): + def setUp(self): + self.current_pserver_ep = "127.0.0.1:6175" + + def test_simple_transpiler(self): + np.random.seed(1) + + trainer = self.get_trainer() + pserver, startup = self.get_pserver(self.current_pserver_ep) + self.assertEqual([op.type for op in trainer.global_block().ops], + self.get_expect_trainer_ops()) + + self.assertEqual(len(pserver.blocks), 2) + # block0: listen_and_serv + self.assertEqual([op.type for op in pserver.blocks[0].ops], + ["listen_and_serv"]) + # block1: optimize pass + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "sgd"]) + + # confirm startup program + self.assertEqual([op.type for op in startup.global_block().ops], + ["fill_constant", "uniform_random", "uniform_random"]) + + # the variable #fc_w will NOT be splited + fc_w_var = startup.global_block().var("fc_w@GRAD") + self.assertEqual(fc_w_var.shape, (1000, 1000)) + + fc_w_var = startup.global_block().var("fc_w@GRAD.trainer_0") + self.assertEqual(fc_w_var.shape, (1000, 1000)) + + def get_expect_trainer_ops(self): + trainer = fluid.Program() + + with fluid.program_guard(trainer): + optimize_ops, params_grads = self.net_conf() + + delete_ops(trainer.global_block(), optimize_ops) + ops = [op.type for op in trainer.global_block().ops] + [ + "send_vars", "send_barrier", "recv", "recv", "fetch_barrier" + ] + ops.insert(ops.index("elementwise_add_grad") + 1, "send_vars") + return ops + + def _transpiler_instance(self): + main = self.get_main_program() + t = fluid.DistributeTranspiler() + t.transpile( + self.trainer_id, + program=main, + pservers=self.pserver_eps, + trainers=self.trainers, + slice_var_up=False) + return t + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_slice_var.py similarity index 85% rename from python/paddle/fluid/tests/unittests/test_split_var.py rename to python/paddle/fluid/tests/unittests/test_slice_var.py index 157def9b56e44092a86023035d1ab444c938aa07..82305b23a1a1e2cee8cef6b291d848581fe5b509 100644 --- a/python/paddle/fluid/tests/unittests/test_split_var.py +++ b/python/paddle/fluid/tests/unittests/test_slice_var.py @@ -14,14 +14,14 @@ import math import unittest -from paddle.fluid.transpiler.distribute_transpiler import split_variable +from paddle.fluid.transpiler.distribute_transpiler import slice_variable import paddle.fluid as fluid import paddle.fluid.core as core import random -class TestSplitVar(unittest.TestCase): - def check_split_output(self, shapes, expected_sizes, min_size): +class TestSliceVar(unittest.TestCase): + def check_slice_output(self, shapes, expected_sizes, min_size): var_list = [] program = fluid.Program() for shape in shapes: @@ -31,7 +31,7 @@ class TestSplitVar(unittest.TestCase): # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) - blocks = split_variable(var_list, 10, min_size) + blocks = slice_variable(var_list, 10, min_size) all_sizes = [] for s in expected_sizes: for s2 in s: @@ -49,7 +49,7 @@ class TestSplitVar(unittest.TestCase): [1150, 1150, 1150, 1150, 1150, 1150, 1100] ] - self.check_split_output(shapes, expected_sizes, 1024) + self.check_slice_output(shapes, expected_sizes, 1024) def test_check_output_8k(self): shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10], @@ -57,7 +57,7 @@ class TestSplitVar(unittest.TestCase): expected_sizes = [[15], [1024], [10976, 10976], [8160], [8000], [35937, 35937, 35937, 35937, 35937, 35937]] - self.check_split_output(shapes, expected_sizes, 8192) + self.check_slice_output(shapes, expected_sizes, 8192) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/transpiler_test.py b/python/paddle/fluid/tests/unittests/transpiler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d84c5d9c41c705cf6d14cc0b5a8c692b0d646337 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/transpiler_test.py @@ -0,0 +1,73 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.layers as layers + + +class TranspilerTest(unittest.TestCase): + @classmethod + def setUpClass(self): + self.trainer_id = 0 + self.trainers = 2 + self.pservers = 2 + self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175" + + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w')) + + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) + + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + return optimize_ops, params_grads + + def get_main_program(self): + main = fluid.Program() + + with fluid.program_guard(main): + self.net_conf() + + return main + + def get_trainer(self): + return self._transpiler_instance().get_trainer_program() + + def get_pserver(self, ep): + t = self._transpiler_instance() + pserver = t.get_pserver_program(ep) + startup = t.get_startup_program(ep, pserver) + return pserver, startup + + def _transpiler_instance(self): + main = self.get_main_program() + t = fluid.DistributeTranspiler() + t.transpile( + self.trainer_id, + program=main, + pservers=self.pserver_eps, + trainers=self.trainers) + return t diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index e5cec4c76af2f1a21c5b032b5c371d8c66edc90d..444162664daeb3f86cc1a04258e8e6932abb5d1f 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -116,13 +116,13 @@ class Trainer(object): Args: train_func(callable): A function which will return loss. The loss must be a scalar. - optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer + optimizer_func(callable): A function that returns an Optimizer object. place: The device place of this trainer. """ def __init__(self, train_func, - optimizer, + optimizer_func, param_path=None, place=None, parallel=False, @@ -132,7 +132,6 @@ class Trainer(object): # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py - assert isinstance(optimizer, opt_module.Optimizer) # config for checkpoint # only chief worker will save variables @@ -155,11 +154,14 @@ class Trainer(object): self.train_func_outputs = program_func_outs if isinstance( program_func_outs, list) else [program_func_outs] self.test_program = self.train_program.clone() + + # The fisrt element of program_func_outs is loss. + loss = self.train_func_outputs[0] + + optimizer = optimizer_func() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") - # The fisrt element of program_func_outs is loss. - loss = self.train_func_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = check_and_get_place(place) diff --git a/python/paddle/fluid/transpiler/__init__.py b/python/paddle/fluid/transpiler/__init__.py index 045ca537b2e84c02298d6375a7ef5bdbb5517380..cf18090f71f34be5105498f5846dbcdf15ab2e3f 100644 --- a/python/paddle/fluid/transpiler/__init__.py +++ b/python/paddle/fluid/transpiler/__init__.py @@ -15,10 +15,9 @@ from distribute_transpiler import DistributeTranspiler from inference_transpiler import InferenceTranspiler from memory_optimization_transpiler import memory_optimize, release_memory -from distribute_transpiler_simple import SimpleDistributeTranspiler from ps_dispatcher import HashName, RoundRobin __all__ = [ - "DistributeTranspiler", "InferenceTranspiler", "SimpleDistributeTranspiler", - "memory_optimize", "release_memory", "HashName", "RoundRobin" + "DistributeTranspiler", "InferenceTranspiler", "memory_optimize", + "release_memory", "HashName", "RoundRobin" ] diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 06b0a1375ce6568cca864cd8a2dd69ee46b223a7..27992df462ffd00ddf445538cc508b4232712481 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -39,6 +39,7 @@ Steps to transpile pserver: from __future__ import print_function import math +import numpy as np from ps_dispatcher import RoundRobin, HashName, PSDispatcher from .. import core, framework @@ -70,7 +71,7 @@ def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") -def split_variable(var_list, service_count, min_block_size=8192): +def slice_variable(var_list, slice_count, min_block_size=8192): """ We may need to split dense tensor to one or more blocks and put them equally onto parameter server. One block is a sub-tensor @@ -78,25 +79,25 @@ def split_variable(var_list, service_count, min_block_size=8192): We need to have a minimal block size so that the calculations in the parameter server side can gain better performance. By default - minimum block size 8K elements (maybe 16bit or 32bit or 64bit). + minimum block size 8K elements (maybe 16bit or 32bit or 64bit). Args: var_list (list): List of variables. - service_count (int): Numel of pserver services. A pserver may have two - or more listening ports. + slice_count (int): Numel of count that variables will be sliced, which + could be the pserver services' count. min_block_size (int): Minimum splitted block size. Returns: - blocks (list[(varname, block_id, current_block_size)]): A list + blocks (list[(varname, block_id, current_block_size)]): A list of VarBlocks. Each VarBlock specifies a shard of the var. """ blocks = [] for var in var_list: - split_count = service_count + split_count = slice_count var_numel = reduce(lambda x, y: x * y, var.shape) max_pserver_count = int(math.floor(var_numel / float(min_block_size))) if max_pserver_count == 0: max_pserver_count = 1 - if max_pserver_count < service_count: + if max_pserver_count < slice_count: split_count = max_pserver_count block_size = int(math.ceil(var_numel / float(split_count))) @@ -177,7 +178,7 @@ class DistributeTranspiler: for index in range(len(self.pserver_endpoints)) ] - def _init_splited_vars(self, split_method): + def _init_splited_vars(self, slice_var_up): # update these mappings for further transpile: # 1. param_var_mapping: param var name -> [splited params vars] # 2. grad_var_mapping: grad var name -> [splited grads vars] @@ -186,19 +187,34 @@ class DistributeTranspiler: param_list = [] grad_list = [] + param_grad_set = set() for p, g in self.params_grads: # skip parameter marked not trainable if type(p) == Parameter and p.trainable == False: continue - param_list.append(p) - grad_list.append(g) + if p.name not in param_grad_set: + param_list.append(p) + param_grad_set.add(p.name) + if g.name not in param_grad_set: + grad_list.append(g) + param_grad_set.add(g.name) self._update_dist_lookup_table_vars(param_list, grad_list, self.params_grads) - grad_blocks = split_variable(grad_list, len(self.pserver_endpoints)) - param_blocks = split_variable(param_list, len(self.pserver_endpoints)) + if slice_var_up: + # when we slice var up into blocks, we will slice the var according to + # pserver services' count. A pserver may have two or more listening ports. + grad_blocks = slice_variable(grad_list, len(self.pserver_endpoints)) + param_blocks = slice_variable(param_list, + len(self.pserver_endpoints)) + else: + # when we do NOT slice var up into blocks, we will always slice params + # grads into one block. + grad_blocks = slice_variable(grad_list, 1) + param_blocks = slice_variable(param_list, 1) assert (len(grad_blocks) == len(param_blocks)) + # origin_varname -> [splited_var] self.param_var_mapping = self._create_vars_from_blocklist( self.origin_program, param_blocks) @@ -229,6 +245,7 @@ class DistributeTranspiler: program=None, pservers="127.0.0.1:6174", trainers=1, + slice_var_up=True, split_method=RoundRobin, sync_mode=True): """ @@ -262,13 +279,27 @@ class DistributeTranspiler: self.has_distributed_lookup_table = self._has_distributed_lookup_table() # split and create vars, then put splited vars in dicts for later use. - self._init_splited_vars(split_method) + self._init_splited_vars(slice_var_up) # step 3.1: insert send op to send gradient vars to parameter servers ps_dispatcher.reset() send_vars = [] - for orig_varname, splited_vars in self.grad_var_mapping.items(): + + # in general cases, the number of pservers is times of 2, and this + # will lead to uneven distribution among weights and bias: + # fc_w@GRAD_trainer_0, fc_w@GRAD_trainer_1 --> pserver1 + # fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2 + # shuffle the map will avoid the uneven distribution above + grad_var_mapping_items = self.grad_var_mapping.items() + if not slice_var_up: + np.random.shuffle(grad_var_mapping_items) + + for orig_varname, splited_vars in grad_var_mapping_items: eplist = ps_dispatcher.dispatch(splited_vars) + + if not slice_var_up: + assert (len(splited_vars) == 1) + if len(splited_vars) == 1: orig_varname = splited_vars[0].name index = find_op_by_output_arg(program.global_block(), @@ -316,6 +347,7 @@ class DistributeTranspiler: for i, ep in enumerate(eplist): self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) + # step4: Concat the parameters splits together after recv. for varname, splited_var in self.param_var_mapping.iteritems(): eps = [] @@ -788,8 +820,8 @@ class DistributeTranspiler: program (ProgramDesc): ProgramDesc which gradients blong. block_list (list[(varname, block_id, block_size)]): List of gradient blocks. add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True. - Returns: - var_mapping (dict(varname->[new_varname_variable])):A dict mapping + Returns: + var_mapping (dict(varname->[new_varname_variable])):A dict mapping from original var name to each var split. """ @@ -802,6 +834,9 @@ class DistributeTranspiler: if not block_map.has_key(varname): block_map[varname] = [] block_map[varname].append((long(offset), long(size))) + # Do not remove this important debug message: + print("block map: %s" % block_map) + for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: diff --git a/python/paddle/fluid/transpiler/distribute_transpiler_simple.py b/python/paddle/fluid/transpiler/distribute_transpiler_simple.py deleted file mode 100644 index ea8c27cdca885dbbf90349b35df9691951264061..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/transpiler/distribute_transpiler_simple.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ..framework import Program, default_main_program, Parameter, Variable -from ..layer_helper import LayerHelper - - -def hash_name_to_server(params_grads, pserver_endpoints): - """ - :param param_grads: - :return: a map of pserver endpoint -> - params -> [param list] - grads -> [grad list] - """ - - def _hash_param(param_name, total): - return hash(param_name) % total - - param_grad_map = dict() - for param, grad in params_grads: - if param.trainable is True and grad is not None: - server_id = _hash_param(param.name, len(pserver_endpoints)) - server_for_param = pserver_endpoints[server_id] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - return param_grad_map - - -def round_robin(params_grads, pserver_endpoints): - assert (len(params_grads) > len(pserver_endpoints)) - - param_grad_map = dict() - pserver_idx = 0 - for param, grad in params_grads: - if param.trainable is True: - server_for_param = pserver_endpoints[pserver_idx] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - pserver_idx += 1 - if pserver_idx >= len(pserver_endpoints): - pserver_idx = 0 - return param_grad_map - - -class SimpleDistributeTranspiler: - def transpile(self, - optimize_ops, - params_grads, - program=None, - pservers="127.0.0.1:6174", - trainers=1, - split_method=round_robin): - """ - Transpile the program to a distributed data-parallelism programs. - - The main_program will be transform to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - in to a parameter server program. - - Use different methods to split trainable varialbles to different - parameter servers. - - Example to run: - - exe = fluid.Executor(place) - t = fluid.DistributeTranspiler() - t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) - - pserver_endpoint = os.getenv("PSERVER") - if pserver_endpoint: - pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) - exe.run(fluid.default_startup_program()) - exe.run(pserver_prog) - else: - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - ... - - :param optimize_ops: op list of optimization, should be the - return value of Optimizer.minimize - :type optimize_ops: list - :param program: program to optimize, default default_main_program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - - :return: return a list of programs - """ - if program is None: - program = default_main_program() - self.program = program - self.trainers = trainers - self.optimize_ops = optimize_ops - self._optimize_distributed( - optimize_ops, - program, - params_grads, - pservers=pservers, - trainers=trainers, - split_method=split_method) - - def _clone_param(self, block, v): - assert isinstance(v, Parameter) - new_p = Parameter( - block=block, - shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=v.stop_gradient, - trainable=v.trainable, - optimize_attr=v.optimize_attr, - regularizer=v.regularizer, - name=v.name) - block.vars[new_p.name] = new_p - - def _clone_var(self, block, var): - assert isinstance(var, Variable) - return block.create_var( - name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=var.persistable) - - def _optimize_distributed(self, optimize_ops, program, params_and_grads, - **kwargs): - if kwargs.has_key("split_method"): - split_method = kwargs["split_method"] - else: - split_method = round_robin - - assert (callable(split_method)) - pserver_endpoints = kwargs["pservers"].split(",") - self.param_grad_map = split_method(params_and_grads, pserver_endpoints) - - send_op_ordered_inputs = [] - send_op_ordered_outputs = [] - epmap = [] - for ep, v in self.param_grad_map.iteritems(): - send_op_ordered_inputs.extend(v["grads"]) - send_op_ordered_outputs.extend(v["params"]) - for i in v["grads"]: - epmap.append(ep) - send_op = program.global_block().append_op( - type="send", - inputs={"X": send_op_ordered_inputs - }, # inputs is a list of tensors to be send - outputs={"Out": send_op_ordered_outputs}, - attrs={"endpoints": pserver_endpoints, - "epmap": epmap}) - - def get_trainer_program(self): - # remove optimize ops and add a send op to main_program - self.program.global_block().delete_ops(self.optimize_ops) - return self.program - - def _create_var_for_trainers(self, block, var, trainers): - var_list = [] - for i in xrange(trainers): - var_each = block.create_var( - name="%s.trainer_%d" % (var.name, i), - psersistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - var_list.append(var_each) - return var_list - - def get_pserver_program(self, endpoint, optimize_ops): - pserver_program = Program() - for v in self.param_grad_map[endpoint]["params"]: - self._clone_param(pserver_program.global_block(), v) - - optimize_sub_program = Program() - grad_var_names = [ - var.name for var in self.param_grad_map[endpoint]["grads"] - ] - for opt_op in optimize_ops: - for _, var in opt_op.inputs.iteritems(): - # NOTE: append operators to merge gradients from multiple - # trainers. If trainers == 1, this is not needed. - if self.trainers > 1 and var.name in grad_var_names: - vars2merge = self._create_var_for_trainers( - optimize_sub_program.global_block(), var, self.trainers) - merged_var = optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - optimize_sub_program.global_block().append_op( - type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}) - optimize_sub_program.global_block().append_op( - type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainers)}) - else: - optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - - if opt_op.inputs.has_key("Grad"): - if opt_op.inputs["Grad"].name in grad_var_names: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) - else: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) - pserver_program.global_block().append_op( - type="recv", - inputs={"RX": - self.param_grad_map[endpoint]["grads"]}, # grads to recv - outputs={}, - attrs={ - "OptimizeBlock": optimize_sub_program.global_block(), - "endpoint": endpoint, - "ParamList": - [p.name for p in self.param_grad_map[endpoint]["params"]], - "GradList": - [p.name for p in self.param_grad_map[endpoint]["grads"]], - "Trainers": self.trainers - }) - pserver_program.sync_with_cpp() - return pserver_program diff --git a/python/paddle/v2/minibatch.py b/python/paddle/v2/minibatch.py index 317cf037c69f8639e3760fbfce20565127794fcb..d48c54fcbb66487617b1946bc69724870c8f879c 100644 --- a/python/paddle/v2/minibatch.py +++ b/python/paddle/v2/minibatch.py @@ -15,7 +15,7 @@ __all__ = ['batch'] -def batch(reader, batch_size): +def batch(reader, batch_size, drop_last=False): """ Create a batched reader. @@ -23,6 +23,8 @@ def batch(reader, batch_size): :type reader: callable :param batch_size: size of each mini-batch :type batch_size: int + :param drop_last: drop the last batch, if the size of last batch is not equal to batch_size. + :type drop_last: bool :return: the batched reader. :rtype: callable """ @@ -35,7 +37,7 @@ def batch(reader, batch_size): if len(b) == batch_size: yield b b = [] - if b: + if drop_last == False and len(b) != 0: yield b return batch_reader diff --git a/tools/codestyle/.gitignore b/tools/codestyle/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d --- /dev/null +++ b/tools/codestyle/.gitignore @@ -0,0 +1 @@ +*.pyc