diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/machine_translation.py index cc31d098328bc237c018ebf8f158bdab5c37bff1..d7a421c10979c3b9d6865a8c0b99a6410e0f46a8 100644 --- a/benchmark/fluid/machine_translation.py +++ b/benchmark/fluid/machine_translation.py @@ -48,6 +48,13 @@ parser.add_argument( type=int, default=16, help="The sequence number of a mini-batch data. (default: %(default)d)") +parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test') +parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') parser.add_argument( "--dict_size", type=int, @@ -72,16 +79,21 @@ parser.add_argument( default=3, help="The width for beam searching. (default: %(default)d)") parser.add_argument( - "--use_gpu", - type=distutils.util.strtobool, - default=True, - help="Whether to use gpu. (default: %(default)d)") + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help="The device type.") parser.add_argument( "--max_length", type=int, default=250, help="The maximum length of sequence when doing generation. " "(default: %(default)d)") +parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): @@ -281,7 +293,7 @@ def train(): paddle.dataset.wmt14.test(args.dict_size), buf_size=1000), batch_size=args.batch_size) - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) exe = Executor(place) exe.run(framework.default_startup_program()) @@ -307,14 +319,20 @@ def train(): return total_loss / count + iters, num_samples, start_time = 0, 0, time.time() for pass_id in xrange(args.pass_num): - pass_start_time = time.time() - words_seen = 0 + train_accs = [] + train_losses = [] for batch_id, data in enumerate(train_batch_generator()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break src_seq, word_num = to_lodtensor(map(lambda x: x[0], data), place) - words_seen += word_num + num_samples += word_num trg_seq, word_num = to_lodtensor(map(lambda x: x[1], data), place) - words_seen += word_num + num_samples += word_num lbl_seq, _ = to_lodtensor(map(lambda x: x[2], data), place) fetch_outs = exe.run(framework.default_main_program(), @@ -325,24 +343,36 @@ def train(): }, fetch_list=[avg_cost]) - avg_cost_val = np.array(fetch_outs[0]) - print('pass_id=%d, batch_id=%d, train_loss: %f' % - (pass_id, batch_id, avg_cost_val)) + iters += 1 + loss = np.array(fetch_outs[0]) + print( + "Pass = %d, Iter = %d, Loss = %f" % (pass_id, iters, loss) + ) # The accuracy is the accumulation of batches, but not the current batch. - pass_end_time = time.time() - test_loss = do_validation() - time_consumed = pass_end_time - pass_start_time - words_per_sec = words_seen / time_consumed - print("pass_id=%d, test_loss: %f, words/s: %f, sec/pass: %f" % - (pass_id, test_loss, words_per_sec, time_consumed)) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + test_loss = do_validation() + exit(0) def infer(): pass +def print_arguments(args): + print('----------- seq2seq Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + if __name__ == '__main__': args = parser.parse_args() + print_arguments(args) if args.infer_only: infer() else: diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index 7f7afaeb11447d936b65a1d83701b0176ecbc111..43866da9cb113e9d49fc1c51f67da94cbc6bfd8e 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -35,6 +35,12 @@ def parse_args(): parser = argparse.ArgumentParser("mnist model benchmark.") parser.add_argument( '--batch_size', type=int, default=128, help='The minibatch size.') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) parser.add_argument( '--iterations', type=int, default=35, help='The number of minibatches.') parser.add_argument( @@ -53,19 +59,14 @@ def parse_args(): '--use_nvprof', action='store_true', help='If set, use nvprof for CUDA.') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') args = parser.parse_args() return args -def print_arguments(args): - vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and - vars(args)['device'] == 'GPU') - print('----------- Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - def cnn_model(data): conv_pool_1 = fluid.nets.simple_img_conv_pool( input=data, @@ -161,16 +162,22 @@ def run_benchmark(model, args): paddle.dataset.mnist.train(), batch_size=args.batch_size) accuracy = fluid.average.WeightedAverage() + iters, num_samples, start_time = 0, 0, time.time() for pass_id in range(args.pass_num): accuracy.reset() - pass_start = time.time() + train_accs = [] + train_losses = [] for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break img_data = np.array( map(lambda x: x[0].reshape([1, 28, 28]), data)).astype(DTYPE) y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([len(y_data), 1]) - start = time.time() outs = exe.run( fluid.default_main_program(), feed={"pixel": img_data, @@ -178,21 +185,36 @@ def run_benchmark(model, args): fetch_list=[avg_cost, batch_acc, batch_size_tensor] ) # The accuracy is the accumulation of batches, but not the current batch. accuracy.add(value=outs[1], weight=outs[2]) - end = time.time() + iters += 1 + num_samples += len(y_data) loss = np.array(outs[0]) acc = np.array(outs[1]) - print("pass=%d, batch=%d, loss=%f, error=%f, elapse=%f" % - (pass_id, batch_id, loss, 1 - acc, (end - start) / 1000)) + train_losses.append(loss) + train_accs.append(acc) + print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % + (pass_id, iters, loss, acc)) + + print("Pass: %d, Loss: %f, Train Accuray: %f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed - pass_end = time.time() + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + test_avg_acc = eval_test(exe, batch_acc, batch_size_tensor, + inference_program) + exit(0) - train_avg_acc = accuracy.eval() - test_avg_acc = eval_test(exe, batch_acc, batch_size_tensor, - inference_program) - print("pass=%d, train_avg_acc=%f, test_avg_acc=%f, elapse=%f" % - (pass_id, train_avg_acc, test_avg_acc, - (pass_end - pass_start) / 1000)) +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + print('----------- mnist Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') if __name__ == '__main__': diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index f0f1db979fa7fb640679beacafd66dfbe1f62ab8..1af5eaf6b46be47cb6b778cedcf53830c201ef39 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -87,15 +87,6 @@ def parse_args(): return args -def print_arguments(args): - vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and - vars(args)['device'] == 'GPU') - print('----------- Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): conv1 = fluid.layers.conv2d( input=input, @@ -279,32 +270,31 @@ def run_benchmark(model, args): 'label': label}, fetch_list=[avg_cost, batch_acc, batch_size_tensor]) iters += 1 - num_samples += label[0] + num_samples += len(label) accuracy.add(value=acc, weight=weight) train_losses.append(loss) train_accs.append(acc) print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % (pass_id, iters, loss, acc)) - pass_train_acc = accuracy.eval() - # evaluation - if args.with_test: - pass_test_acc = test(exe) - train_elapsed = time.time() - start_time print("Pass: %d, Loss: %f, Train Accuray: %f\n" % (pass_id, np.mean(train_losses), np.mean(train_accs))) - + train_elapsed = time.time() - start_time examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % (num_samples, train_elapsed, examples_per_sec)) + # evaluation + if args.with_test: + pass_test_acc = test(exe) + exit(0) - if args.use_cprof: - pr.disable() - s = StringIO.StringIO() - sortby = 'cumulative' - ps = pstats.Stats(pr, stream=s).sort_stats(sortby) - ps.print_stats() - print(s.getvalue()) + +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + print('----------- resnet Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') if __name__ == '__main__': diff --git a/benchmark/fluid/run.sh b/benchmark/fluid/run.sh index 663e2efd5392a6cd1a71f51fa0d017070b489341..f6dfd20bf2ee0b668b6d4238d4511253b2233035 100644 --- a/benchmark/fluid/run.sh +++ b/benchmark/fluid/run.sh @@ -1,7 +1,9 @@ #!/bin/bash # This script benchmarking the PaddlePaddle Fluid on # single thread single GPU. -export CUDNN_PATH=/paddle/cudnn_v5/cuda/lib + +#export FLAGS_fraction_of_gpu_memory_to_use=0.0 +export CUDNN_PATH=/paddle/cudnn_v5 # disable openmp and mkl parallel #https://github.com/PaddlePaddle/Paddle/issues/7199 @@ -25,25 +27,79 @@ export CUDA_VISIBLE_DEVICES=0 export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CUDNN_PATH:$LD_LIBRARY_PATH +# only query the gpu used +nohup stdbuf -oL nvidia-smi \ + --id=${CUDA_VISIBLE_DEVICES} \ + --query-gpu=timestamp \ + --query-compute-apps=pid,process_name,used_memory \ + --format=csv \ + --filename=mem.log \ + -l 1 & +# mnist +# mnist gpu mnist 128 +FLAGS_benchmark=true stdbuf -oL python fluid/mnist.py \ + --device=GPU \ + --batch_size=128 \ + --skip_batch_num=5 \ + --iterations=500 \ + 2>&1 | tee -a mnist_gpu_128.log # vgg16 -# cifar10 gpu cifar10 128 -FLAGS_benchmark=true python fluid/vgg.py \ +# gpu cifar10 128 +FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ - --iterations=30 \ - 2>&1 > vgg16_gpu_128.log + --iterations=30 \ + 2>&1 | tee -a vgg16_gpu_128.log + +# flowers gpu 128 +FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ + --device=GPU \ + --batch_size=32 \ + --data_set=flowers \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a vgg16_gpu_flowers_32.log # resnet50 # resnet50 gpu cifar10 128 -FLAGS_benchmark=true python fluid/resnet.py \ +FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ --device=GPU \ --batch_size=128 \ --data_set=cifar10 \ --model=resnet_cifar10 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 > resnet50_gpu_128.log + 2>&1 | tee -a resnet50_gpu_128.log + +# resnet50 gpu flowers 64 +FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ + --device=GPU \ + --batch_size=64 \ + --data_set=flowers \ + --model=resnet_imagenet \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a resnet50_gpu_flowers_64.log # lstm +# lstm gpu imdb 32 # tensorflow only support batch=32 +FLAGS_benchmark=true stdbuf -oL python fluid/stacked_dynamic_lstm.py \ + --device=GPU \ + --batch_size=32 \ + --skip_batch_num=5 \ + --iterations=30 \ + --hidden_dim=512 \ + --emb_dim=512 \ + --crop_size=1500 \ + 2>&1 | tee -a lstm_gpu_32.log + +# seq2seq +# seq2seq gpu wmb 128 +FLAGS_benchmark=true stdbuf -oL python fluid/machine_translation.py \ + --device=GPU \ + --batch_size=128 \ + --skip_batch_num=5 \ + --iterations=30 \ + 2>&1 | tee -a lstm_gpu_128.log diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py index 4e063549e0239abf9d946ed8735f0306203509d0..5fcbdd64af9dc196c9d5b2b82ce4213478ea1418 100644 --- a/benchmark/fluid/stacked_dynamic_lstm.py +++ b/benchmark/fluid/stacked_dynamic_lstm.py @@ -37,6 +37,14 @@ def parse_args(): type=int, default=32, help='The sequence number of a batch data. (default: %(default)d)') + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') parser.add_argument( '--emb_dim', type=int, @@ -64,6 +72,10 @@ def parse_args(): default=int(os.environ.get('CROP_SIZE', '1500')), help='The max sentence length of input. Since this model use plain RNN,' ' Gradient could be explored if sentence is too long') + parser.add_argument( + '--with_test', + action='store_true', + help='If set, test the testset during training.') args = parser.parse_args() return args @@ -157,37 +169,43 @@ def main(): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - def train_loop(pass_num, crop_size): - with profiler.profiler(args.device, 'total') as prof: - for pass_id in range(pass_num): - train_reader = batch( - paddle.reader.shuffle( - crop_sentence(imdb.train(word_dict), crop_size), - buf_size=25000), - batch_size=args.batch_size) - word_nums = 0 - pass_start_time = time.time() - for batch_id, data in enumerate(train_reader()): - tensor_words = to_lodtensor([x[0] for x in data], place) - for x in data: - word_nums += len(x[0]) - label = numpy.array([x[1] for x in data]).astype("int64") - label = label.reshape((-1, 1)) - loss_np, acc, weight = exe.run( - fluid.default_main_program(), - feed={"words": tensor_words, - "label": label}, - fetch_list=[loss, batch_acc, batch_size_tensor]) - print("pass_id=%d, batch_id=%d, loss=%f, acc=%f" % - (pass_id, batch_id, loss_np, acc)) - - pass_end_time = time.time() - time_consumed = pass_end_time - pass_start_time - words_per_sec = word_nums / time_consumed - print("pass_id=%d, sec/pass: %f, words/s: %f" % - (pass_id, time_consumed, words_per_sec)) - - train_loop(args.pass_num, args.crop_size) + train_reader = batch( + paddle.reader.shuffle( + crop_sentence(imdb.train(word_dict), args.crop_size), + buf_size=25000), + batch_size=args.batch_size) + + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.pass_num): + train_accs = [] + train_losses = [] + for batch_id, data in enumerate(train_reader()): + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if iters == args.iterations: + break + tensor_words = to_lodtensor([x[0] for x in data], place) + label = numpy.array([x[1] for x in data]).astype("int64") + label = label.reshape((-1, 1)) + loss_np, acc, weight = exe.run( + fluid.default_main_program(), + feed={"words": tensor_words, + "label": label}, + fetch_list=[loss, batch_acc, batch_size_tensor]) + iters += 1 + for x in data: + num_samples += len(x[0]) + print( + "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % + (pass_id, iters, loss_np, acc) + ) # The accuracy is the accumulation of batches, but not the current batch. + + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + exit(0) def to_lodtensor(data, place): @@ -205,5 +223,14 @@ def to_lodtensor(data, place): return res +def print_arguments(args): + print('----------- lstm Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + if __name__ == '__main__': + args = parse_args() + print_arguments(args) main() diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 3bf78e4cf08d43127a05c740fa30ca6d2bc416b0..9d990eff62ec368dc7033f55cc0862fa974a64e0 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -191,25 +191,29 @@ def main(): fetch_list=[avg_cost, batch_acc, batch_size_tensor]) accuracy.add(value=acc, weight=weight) iters += 1 - num_samples += len(data) + num_samples += len(y_data) print( "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % (pass_id, iters, loss, acc) ) # The accuracy is the accumulation of batches, but not the current batch. - pass_train_acc = accuracy.eval() + # pass_train_acc = accuracy.eval() train_losses.append(loss) train_accs.append(acc) + print("Pass: %d, Loss: %f, Train Accuray: %f\n" % + (pass_id, np.mean(train_losses), np.mean(train_accs))) + train_elapsed = time.time() - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) # evaluation if args.with_test: pass_test_acc = test(exe) - train_elapsed = time.time() - start_time - print("Pass: %d, Loss: %f, Train Accuray: %f\n" % - (pass_id, np.mean(train_losses), np.mean(train_accs))) + exit(0) def print_arguments(): - print('----------- Configuration Arguments -----------') + print('----------- vgg Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): print('%s: %s' % (arg, value)) print('------------------------------------------------') diff --git a/cmake/generic.cmake b/cmake/generic.cmake index e8bc285bdc95e213b9da2ee388078349a46d2798..c4c9f77df8d57fe162616d2250bd4dfe5b7754e7 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -244,11 +244,11 @@ function(cc_test TARGET_NAME) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) # Support linking flags: --whole-archive (Linux) / -force_load (MacOS) - target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) + target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) if("${cc_test_DEPS}" MATCHES "ARCHIVE_START") list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END) endif() - add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) @@ -311,8 +311,8 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_test(${TARGET_NAME} ${TARGET_NAME}) endif() endfunction(nv_test) @@ -387,8 +387,8 @@ function(hip_test TARGET_NAME) endif() add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources}) set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) - target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) - add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) + add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) add_test(${TARGET_NAME} ${TARGET_NAME}) endif() endfunction(hip_test) diff --git a/doc/design/file_manager/README.md b/doc/design/file_manager/README.md deleted file mode 100644 index 3df10d801e568834729f902aace483d033340e2d..0000000000000000000000000000000000000000 --- a/doc/design/file_manager/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# FileManager设计文档 -## 目标 -在本文档中,我们设计说明了名为FileManager系统,方便用户上传自己的训练数据以进行分布式训练 - -主要功能包括: - -- 提供常用的命令行管理命令管理文件和目录 -- 支持大文件的断点上传、下载 - -## 名词解释 -- PFS:是`Paddlepaddle cloud File System`的缩写,是对用户文件存储空间的抽象,与之相对的是local filesystem。目前我们用CephFS来搭建。 -- [CephFS](http://docs.ceph.com/docs/master/cephfs/):一个POSIX兼容的文件系统。 -- Chunk:逻辑划上文件分块的单位。 - -## 模块 -### 架构图 - - -### PFSClient -- 功能: 详细设计[link](./pfs/pfsclient.md) - - 提供用户管理文件的命令 - - 需要可以跨平台执行 - -- 双向验证 - PFSClient需要和Ingress之间做双向验证[tls](#tls),所以用户需要首先在`cloud.paddlepaddle.org`上注册一下,申请用户空间,并且把系统生成的CA(certificate authority)、Key、CRT(CA signed certificate)下载到本地,然后才能使用PFSClient。 - -### [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- 功能: - 提供七层协议的反向代理、基于粘性会话的负载均衡功能。 - -- 透传用户身份的办法 - Ingress需要把PFSClient的身份信息传给PFSServer,配置的方法参考[link](http://www.integralist.co.uk/posts/clientcertauth.html#3) - -### PFSServer -PFSServer提供RESTful API接口,接收处理PFSClient端的文件管理请求,并且把结果返回PFSClient端。 - -RESTful API - -- /api/v1/files - - `GET /api/v1/files`: Get metadata of files or directories. - - `POST /api/v1/files`: Create files or directories. - - `PATCH /api/v1/files`: Update files or directories. - - `DELETE /api/v1/files`: Delete files or directories. - -- /api/v1/file/chunks - - `GET /api/v1/storage/file/chunks`: Get chunks's metadata of a file. - -- /api/v1/storage/files - - `GET /api/v1/storage/files`: Download files or directories. - - `POST /api/v1/storage/files`: Upload files or directories. - -- /api/v1/storage/file/chunks - - `GET /api/v1/storage/file/chunks`: Download chunks's data. - - `POST /api/v1/storage/file/chunks`: Upload chunks's data. - -## 文件传输优化 - -### 分块文件传输 -用户文件可能是比较大的,上传到Cloud或者下载到本地的时间可能比较长,而且在传输的过程中也可能出现网络不稳定的情况。为了应对以上的问题,我们提出了Chunk的概念,一个Chunk由所在的文件偏移、数据、数据长度及校验值组成。文件的上传和下载都是通过对Chunk的操作来实现的。由于Chunk比较小(默认256K),完成一个传输动作完成的时间也比较短,不容易出错。PFSClient需要在传输完毕最后一个Chunk的时候检查destination文件的MD5值是否和source文件一致。 - -一个典型的Chunk如下所示: - -``` -type Chunk struct { - fileOffset int64 - checksum uint32 - len uint32 - data []byte -} -``` - -### 生成sparse文件 -当destination文件不存在或者大小和source文件不一致时,可以用[Fallocate](https://Go.org/pkg/syscall/#Fallocate)生成sparse文件,然后就可以并发写入多个Chunk。 - -### 覆盖不一致的部分 -文件传输的的关键在于需要PFSClient端对比source和destination的文件Chunks的checksum是否保持一致,不一致的由PFSClient下载或者传输Chunk完成。这样已经传输成功的部分就不用重新传输了。 - -## 用户使用流程 -参考[link](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/cluster_train/data_dispatch.md) - -## 框架生成 -用[swagger](https://github.com/swagger-api/swagger-codegen)生成PFSClient和PFSServer的框架部分,以便我们可以把更多的精力放到逻辑本身上。 - -## 参考文档 -- [TLS complete guide](https://github.com/k8sp/tls/blob/master/tls.md) -- [aws.s3](http://docs.aws.amazon.com/cli/latest/reference/s3/) -- [linux man document](https://linux.die.net/man/) diff --git a/doc/design/file_manager/pfs/pfsclient.md b/doc/design/file_manager/pfs/pfsclient.md deleted file mode 100644 index 56bc70c54bbc92b78d66e04fb495b1300cf8ebe0..0000000000000000000000000000000000000000 --- a/doc/design/file_manager/pfs/pfsclient.md +++ /dev/null @@ -1,129 +0,0 @@ -# PFSClient - -## Description -The `pfs` command is a Command Line Interface to manage your files on PaddlePaddle Cloud - -## Synopsis -``` -paddle [options] pfs [parameters] -``` - -## Options -``` ---profile (string) - Use a specific profile from your credential file. - ---help (string) - Display more information about command - ---version - Output version information and exit - ---debug - Show detailed debugging log - ---only-show-errors (boolean) - Only errors and warnings are displayed. All other output is suppressed. -``` - -## Path Arguments -When using a command, we need to specify path arguments. There are two path argument type: `localpath` and `pfspath`. - -A `pfspath` begin with `/pfs`, eg: `/pfs/$DATACENTER/home/$USER/folder`. - -[Here](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/cluster_train/data_dispatch.md#上传训练文件) is how to config datacenters. - -## order of Path Arguments -Commonly, if there are two path arguments, the first is the source, and the second is the destination. - -## Subcommonds -- rm - remove files or directories - -``` -Synopsis: - rm [-r] [-v] ... - -Options: - -r - Remove directories and their contents recursively - -v - Cause rm to be verbose, showing files after they are removed. - -Examples: - paddle pfs rm /pfs/$DATACENTER/home/$USER/file - paddle pfs rm -r /pfs/$DATACENTER/home/$USER/folder -``` -- mv - move (rename) files - -``` -Synopsis: - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - mv [-f | -n] [-v] - mv [-f | -n] [-v] ... - -Options: - -f - Do not prompt for confirmation before overwriting the destination path. (The -f option overrides previous -n options.) - -n - Do not overwrite an existing file. (The -n option overrides previous -f options.) - -v - Cause mv to be verbose, showing files after they are moved. - -Examples: - paddle pfs mv ./text1.txt /pfs/$DATACENTER/home/$USER/text1.txt -``` -- cp - copy files or directories - -``` -Synopsis: - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - cp [-r] [-f | -n] [-v] [--preserve--links] - cp [-r] [-f | -n] [-v] [--preserve--links] ... - -Options: - -r - Copy directories recursively - -f - Do not prompt for confirmation before overwriting the destination path. (The -f option overrides previous -n options.) - -n - Do not overwrite an existing file. (The -n option overrides previous -f options.) - -v - Cause cp to be verbose, showing files after they are copied. - --preserve--links - Reserve links when copy links - -Examples: - paddle pfs cp ./file /pfs/$DATACENTER/home/$USER/file - paddle pfs cp /pfs/$DATACENTER/home/$USER/file ./file -``` -- ls- list files - -``` -Synopsis: - ls [-r] ... - -Options: - -R - List directory(ies) recursively - -Examples: - paddle pfs ls /pfs/$DATACENTER/home/$USER/file - paddle pfs ls /pfs/$DATACENTER/home/$USER/folder -``` - -- mkdir - mkdir directory(ies) -Create intermediate directory(ies) as required. - -``` -Synopsis: - mkdir ... - -Examples: - paddle pfs mkdir /pfs/$DATACENTER/home/$USER/folder -``` diff --git a/doc/design/file_manager/src/filemanager.graffle b/doc/design/file_manager/src/filemanager.graffle deleted file mode 100644 index 7861a33072bc1908f69d12b37c20491dd8663103..0000000000000000000000000000000000000000 Binary files a/doc/design/file_manager/src/filemanager.graffle and /dev/null differ diff --git a/doc/design/file_manager/src/filemanager.png b/doc/design/file_manager/src/filemanager.png deleted file mode 100644 index 8139a19f5722f56d3c211f3ab0d3982f751134b9..0000000000000000000000000000000000000000 Binary files a/doc/design/file_manager/src/filemanager.png and /dev/null differ diff --git a/doc/fluid/dev/index_cn.rst b/doc/fluid/dev/index_cn.rst index f627437f354a12c79cad25c959409db29ecbd874..b123b756e2251c38f319e1aefa2cb04fd7a36b03 100644 --- a/doc/fluid/dev/index_cn.rst +++ b/doc/fluid/dev/index_cn.rst @@ -9,5 +9,5 @@ use_eigen_cn.md name_convention.md support_new_device.md - releasing_process.md + releasing_process_cn.md op_markdown_format.md diff --git a/doc/fluid/dev/index_en.rst b/doc/fluid/dev/index_en.rst index 0b65fed67ad45eb399b624184485a99a082d79e9..98988fc22dcedecdbcd67fb3bf761377bf046337 100644 --- a/doc/fluid/dev/index_en.rst +++ b/doc/fluid/dev/index_en.rst @@ -9,5 +9,5 @@ Development use_eigen_en.md name_convention.md support_new_device.md - releasing_process.md + releasing_process_en.md op_markdown_format.md diff --git a/doc/fluid/dev/releasing_process.md b/doc/fluid/dev/releasing_process_cn.md similarity index 74% rename from doc/fluid/dev/releasing_process.md rename to doc/fluid/dev/releasing_process_cn.md index c5943ccd81c2ae2aaacd2676da12509db889f54a..4c6728fba7150b0f1e180e57590f18a5b677c70d 100644 --- a/doc/fluid/dev/releasing_process.md +++ b/doc/fluid/dev/releasing_process_cn.md @@ -10,19 +10,10 @@ PaddlePaddle每次发新的版本,遵循以下流程: * 使用Regression Test List作为检查列表,测试本次release的正确性。 * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,到第二步 * 修改`python/setup.py.in`中的版本信息,并将`istaged`字段设为`True`。 - * 编译这个版本的python wheel包,并发布到pypi。 - * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 - * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 - * 上传方法: - ``` - cd build/python - pip install twine - twine upload dist/[package to upload] - ``` - * 编译这个版本的Docker发行镜像,发布到dockerhub。如果失败,修复Docker编译镜像问题,Patch号加一,返回第二步 -1. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 -1. 协同完成Release Note的书写 - + * 将这个版本的python wheel包发布到pypi。 + * 更新Docker镜像(参考后面的操作细节)。 +1. 第三步完成后,将`release/版本号`分支合入master分支,将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。 +1. 协同完成Release Note的书写。 需要注意的是: @@ -31,13 +22,18 @@ PaddlePaddle每次发新的版本,遵循以下流程: ## 发布wheel包到pypi -使用[PaddlePaddle CI](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) +1. 使用[PaddlePaddle CI](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) 完成自动化二进制编译,参考下图,选择需要发布的版本(通常包含一个CPU版本和一个GPU版本),点击"run"右侧的"..."按钮,可以 -弹出下面的选择框,在第二个tab (Changes)里选择需要发布的分支,这里选择0.11.0,然后点击"Run Build"按钮。等待编译完成后 -可以在此页面的"Artifacts"下拉框中找到生成的3个二进制文件,分别对应CAPI,`cp27m`和`cp27mu`的版本。然后按照上述的方法 -使用`twine`工具上传即可。 - - +弹出下面的选择框,在第二个tab (Changes)里选择需要发布的分支,这里选择0.11.0,然后点击"Run Build"按钮。 + +1. 等待编译完成后可以在此页面的"Artifacts"下拉框中找到生成的3个二进制文件,分别对应CAPI,`cp27m`和`cp27mu`的版本。 +1. 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 +1. 上传: +``` +cd build/python +pip install twine +twine upload dist/[package to upload] +``` * 注:CI环境使用 https://github.com/PaddlePaddle/buildtools 这里的DockerImage作为编译环境以支持更多的Linux 发型版,如果需要手动编译,也可以使用这些镜像。这些镜像也可以从 https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/ 下载得到。 @@ -48,10 +44,20 @@ PaddlePaddle每次发新的版本,遵循以下流程: 上述PaddlePaddle CI编译wheel完成后会自动将Docker镜像push到DockerHub,所以,发布Docker镜像只需要对自动push的镜像打上 版本号对应的tag即可: -1. 进入 https://hub.docker.com/r/paddlepaddle/paddle/tags/ 查看latest tag的更新时间是否在上述编译wheel包完成后是否最新。 -1. 执行 `docker pull paddlepaddle/paddle:[latest tag]`,latest tag可以是latest或latest-gpu等。 -1. 执行 `docker tag paddlepaddle/paddle:[latest tag] paddlepaddle/paddle:[version]` -1. 执行 `docker push paddlepaddle/paddle:[version]` +``` +docker pull [镜像]:latest +docker tag [镜像]:latest [镜像]:[version] +docker push [镜像]:[version] +``` + +需要更新的镜像tag包括: + +* `[version]`: CPU版本 +* `[version]-openblas`: openblas版本 +* `[version]-gpu`: GPU版本(CUDA 8.0 cudnn 5) +* `[version]-gpu-[cudaver]-[cudnnver]`: 不同cuda, cudnn版本的镜像 + +之后可进入 https://hub.docker.com/r/paddlepaddle/paddle/tags/ 查看是否发布成功。 ## PaddlePaddle 分支规范 @@ -76,7 +82,7 @@ PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git- ### PaddlePaddle Book中所有章节 -PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 +PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练(V2和Fluid)模型正确性。 diff --git a/doc/fluid/dev/releasing_process_en.md b/doc/fluid/dev/releasing_process_en.md new file mode 100644 index 0000000000000000000000000000000000000000..f989b964d6d1a329bbe31adc7ec10db017acaefa --- /dev/null +++ b/doc/fluid/dev/releasing_process_en.md @@ -0,0 +1,210 @@ +# PaddlePaddle Releasing Process + +PaddlePaddle manages its branches using "git-flow branching model", and [Semantic Versioning](http://semver.org/) as it's version number semantics. + +Each time we release a new PaddlePaddle version, we should follow the below steps: + +1. Fork a new branch from `develop` named `release/[version]`, e.g. `release/0.10.0`. +1. Push a new tag on the release branch, the tag name should be like `[version]rc.patch`. The + first tag should be `0.10.0rc1`, and the second should be `0.10.0.rc2` and so on. +1. After that, we should do: + * Run all regression test on the Regression Test List (see PaddlePaddle TeamCity CI), to confirm + that this release has no major bugs. + * If regression test fails, we must fix those bugs and create a new `release/[version]` + branch from previous release branch. + * Modify `python/setup.py.in`, change the version number and change `ISTAGED` to `True`. + * Publish PaddlePaddle release wheel packages to pypi (see below instructions for detail). + * Update the Docker images (see below instructions for detail). +1. After above step, merge `release/[version]` branch to master and push a tag on the master commit, + then merge `master` to `develop`. +1. Update the Release Note. + +***NOTE:*** + +* Do ***NOT*** merge commits from develop branch to release branches to keep the release branch contain + features only for current release, so that we can test on that version. +* If we want to fix bugs on release branches, we must merge the fix to master, develop and release branch. + +## Publish Wheel Packages to pypi + +1. Use our [CI tool](https://paddleci.ngrok.io/project.html?projectId=Manylinux1&tab=projectOverview) + to build all wheel packages needed to publish. As shown in the following picture, choose a build + version, click "..." button on the right side of "Run" button, and switch to the second tab in the +pop-up box, choose the current release branch and click "Run Build" button. You may repeat this + step to start different versions of builds. + +1. After the build succeeds, download the outputs under "Artifacts" including capi, `cp27m` and `cp27mu`. +1. Since pypi.python.org follows [PEP 513](https://www.python.org/dev/peps/pep-0513), before we + upload the package using `twine`, we need to rename the package from `linux_x86_64` to + `manylinux1_x86_64`. +1. Start the upload: + ``` + cd build/python + pip install twine + twine upload dist/[package to upload] + ``` + +* NOTE: We use a special Docker image to build our releases to support more Linux distributions, you can + download it from https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/, or build it using + scripts under `tools/manylinux1`. +* pypi does not allow overwrite the already uploaded version of wheel package, even if you delete the + old version. you must change the version number before upload a new one. + +## Publish Docker Images + +Our CI tool will push latest images to DockerHub, so we only need to push a version tag like: + +``` +docker pull [image]:latest +docker tag [image]:latest [image]:[version] +docker push [image]:[version] +``` + +Tags that need to be updated are: +* `[version]`: CPU only version image +* `[version]-openblas`: openblas version image +* `[version]-gpu`: GPU version(using CUDA 8.0 cudnn 5) +* `[version]-gpu-[cudaver]-[cudnnver]`: tag for different cuda, cudnn versions + +You can then checkout the latest pushed tags at https://hub.docker.com/r/paddlepaddle/paddle/tags/. + +## Branching Model + +We use [git-flow](http://nvie.com/posts/a-successful-git-branching-model/) as our branching model, +with some modifications: + +* `master` branch is the stable branch. Each version on the master branch is tested and guaranteed. +* `develop` branch is for development. Each commit on develop branch has passed CI unit test, but no + regression tests are run. +* `release/[version]` branch is used to publish each release. Latest release version branches have + bugfix only for that version, but no feature updates. +* Developer forks are not required to follow + [git-flow](http://nvie.com/posts/a-successful-git-branching-model/) + branching model, all forks is like a feature branch. + * Advise: developer fork's develop branch is used to sync up with main repo's develop branch. + * Advise: developer use it's fork's develop branch to for new branch to start developing. + * Use that branch on developer's fork to create pull requests and start reviews. + * developer can push new commits to that branch when the pull request is open. +* Bug fixes are also started from developers forked repo. And, bug fixes branch can merge to + `master`, `develop` and `releases`. + +## PaddlePaddle Regression Test List + +### All Chapters of PaddlePaddle Book + +We need to guarantee that all the chapters of PaddlePaddle Book can run correctly. Including +V1 (`paddle_trainer` training) and V2 training and Fluid training. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Linear RegressionRecognize DigitsImage ClassificationWord2VecPersonalized RecommendationSentiment AnalysisSemantic Role LabelingMachine Translation
API.V2 + Docker + GPU
API.V2 + Docker + CPU
`paddle_trainer` + Docker + GPU
`paddle_trainer` + Docker + CPU
API.V2 + Ubuntu + GPU
API.V2 + Ubuntu + CPU
`paddle_trainer` + Ubuntu + GPU
`paddle_trainer` + Ubuntu + CPU
diff --git a/paddle/.gitignore b/paddle/.gitignore index f921eef14156a97e4fd250f014960e306b43f35a..1c1c0c2c829f088d7e3f52ca007fcb8f33a16a36 100644 --- a/paddle/.gitignore +++ b/paddle/.gitignore @@ -1,3 +1,4 @@ +.timestamp *.o *.a .svn diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index a473ed7400012b7d0cbc5ab9bed263b3cca8c6ec..3840bbe83b68dc2a49aa73feb57a80e9992cad5f 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -7,9 +7,9 @@ cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) if(WITH_GPU) - nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place paddle_memory device_context framework_proto) + nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place memory device_context framework_proto) else() - cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place paddle_memory device_context framework_proto) + cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place memory device_context framework_proto) endif() cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) @@ -21,9 +21,9 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) -nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place paddle_memory device_context init) +nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context init) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) -cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) +cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 16a118090ba9cfd50b4b03484983f9fc73cf7973..c2ca1bbc78f3ebc6066df6b666720af0d1fbbf59 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -93,6 +93,43 @@ static void CheckTensorNANOrInf(const std::string& name, "Tensor %s contains NAN", name); } +void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope, + int block_id) { + auto& global_block = pdesc.Block(block_id); + + const Scope* ancestor_scope = scope; + while (ancestor_scope->parent()) { + ancestor_scope = ancestor_scope->parent(); + } + + if (ancestor_scope != scope) { + for (auto& var : global_block.AllVars()) { + if (var->Name() == framework::kEmptyVarName) { + continue; + } + + if (var->Persistable()) { + auto* ptr = const_cast(ancestor_scope)->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " global, which pointer is " << ptr; + } else { + auto* ptr = scope->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " locally, which pointer is " << ptr; + } + } + } else { + for (auto& var : global_block.AllVars()) { + auto* ptr = scope->Var(var->Name()); + InitializeVariable(ptr, var->GetType()); + VLOG(3) << "Create variable " << var->Name() << ", which pointer is " + << ptr; + } + } +} + void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, bool create_local_scope, bool create_vars) { platform::RecordBlock b(block_id); @@ -184,8 +221,8 @@ static bool has_fetch_operators( void Executor::Run(const ProgramDesc& program, Scope* scope, std::map& feed_targets, std::map& fetch_targets, - const std::string& feed_holder_name, - const std::string& fetch_holder_name, bool create_vars) { + bool create_vars, const std::string& feed_holder_name, + const std::string& fetch_holder_name) { platform::RecordBlock b(kProgramId); bool has_feed_ops = has_feed_operators(program.Block(0), feed_targets, feed_holder_name); @@ -296,38 +333,13 @@ std::vector> Executor::Prepare( void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope, bool create_vars) { - auto& block = ctx->prog_.Block(ctx->block_id_); - Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { local_scope = &scope->NewScope(); - for (auto& var : block.AllVars()) { - if (var->Name() == framework::kEmptyVarName) { - continue; - } - - if (var->Persistable()) { - auto* ptr = scope->Var(var->Name()); - InitializeVariable(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " global, which pointer is " << ptr; - } else { - auto* ptr = local_scope->Var(var->Name()); - InitializeVariable(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " locally, which pointer is " << ptr; - } - } - } else { - for (auto& var : block.AllVars()) { - auto* ptr = local_scope->Var(var->Name()); - InitializeVariable(ptr, var->GetType()); - VLOG(3) << "Create variable " << var->Name() << ", which pointer is " - << ptr; - } - } // if (create_local_scope) - } // if (create_vars) + } + CreateVariables(ctx->prog_, local_scope, ctx->block_id_); + } for (auto& op : ctx->ops_) { VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index d7c99165f0c9d3b1ae11a3b4753a61e8118f7b52..75b29b2f4065ad75b62a134b890b8f9f6730fdc7 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -54,9 +54,9 @@ class Executor { void Run(const ProgramDesc& program, Scope* scope, std::map& feed_targets, std::map& fetch_targets, + bool create_vars = true, const std::string& feed_holder_name = "feed", - const std::string& fetch_holder_name = "fetch", - bool create_vars = true); + const std::string& fetch_holder_name = "fetch"); static std::unique_ptr Prepare( const ProgramDesc& program, int block_id); @@ -64,6 +64,8 @@ class Executor { static std::vector> Prepare( const ProgramDesc& program, const std::vector& block_ids); + void CreateVariables(const ProgramDesc& pdesc, Scope* scope, int block_id); + void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope = true, bool create_vars = true); diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 7be93fa6002ae93c3e1b75c8f7fe5ca5f40b271f..74945fb4f2f745b6ca9c48adb0c8b9e6ae1e94a4 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/parallel_executor.h" -#include "paddle/fluid/platform/profiler.h" #include #include @@ -24,6 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/details/multi_devices_graph_builder.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { @@ -43,30 +43,40 @@ class ParallelExecutorPrivate { #endif }; +std::vector &ParallelExecutor::GetLocalScopes() { + return member_->local_scopes_; +} + ParallelExecutor::ParallelExecutor( size_t num_threads, bool use_event, const std::vector &places, const std::unordered_set ¶ms, - const ProgramDesc &startup_program, const ProgramDesc &main_program, - const std::string &loss_var_name, Scope *scope, bool allow_op_delay) + const std::unordered_set &bcast_vars, + const ProgramDesc &main_program, const std::string &loss_var_name, + Scope *scope, const std::vector &local_scopes, bool allow_op_delay) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; - // Step 1. RunStartupProgram and Bcast the params to devs. - Executor exe(places[0]); - exe.Run(startup_program, scope, 0); + // Step 1. Bcast the params to devs. // Create local scopes - for (size_t i = 0; i < member_->places_.size(); ++i) { - member_->local_scopes_.push_back(&scope->NewScope()); + if (local_scopes.empty()) { + for (size_t i = 0; i < member_->places_.size(); ++i) { + member_->local_scopes_.push_back(&scope->NewScope()); + } + } else { + PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size()); + for (size_t i = 0; i < member_->places_.size(); ++i) { + member_->local_scopes_.push_back(local_scopes[i]); + } } // Bcast Parameters to all GPUs #ifdef PADDLE_WITH_CUDA member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_)); #endif - if (platform::is_gpu_place(places[0]) && - member_->local_scopes_.size() != 1) { // Is CUDA - BCastParamsToGPUs(startup_program); + if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 && + local_scopes.empty()) { // Is CUDA + BCastParamsToGPUs(bcast_vars); } // Startup Program has been run. All local scopes has correct parameters. @@ -99,48 +109,47 @@ ParallelExecutor::ParallelExecutor( } void ParallelExecutor::BCastParamsToGPUs( - const ProgramDesc &startup_program) const { + const std::unordered_set &vars) const { #ifdef PADDLE_WITH_CUDA auto *main_scope = member_->local_scopes_[0]; - for (auto *var_desc : startup_program.Block(0).AllVars()) { - size_t idx = var_desc->Name().find("@GRAD"); - if (idx != std::string::npos) continue; - if (var_desc->GetType() == proto::VarType::LOD_TENSOR) { - auto &main_tensor = - main_scope->FindVar(var_desc->Name())->Get(); - - auto &dims = main_tensor.dims(); - - if (paddle::platform::is_gpu_place(main_tensor.place())) { - size_t numel = main_tensor.numel(); - ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type()); - platform::NCCLGroupGuard guard; - for (size_t i = 0; i < member_->places_.size(); ++i) { - auto place = member_->places_[i]; - void *buffer; - if (i == 0) { - buffer = const_cast(main_tensor.data()); - } else { - auto local_scope = member_->local_scopes_[i]; - auto *t = - local_scope->Var(var_desc->Name())->GetMutable(); - t->Resize(dims); - buffer = t->mutable_data(place, main_tensor.type()); - } - auto &nccl_ctx = member_->nccl_ctxs_->at(place); - platform::dynload::ncclBcast(buffer, numel, data_type, 0, - nccl_ctx.comm_, nccl_ctx.stream()); - } - } else { - platform::CPUPlace cpu; - for (size_t i = 1; i < member_->places_.size(); ++i) { + for (auto &var : vars) { + auto *main_var = main_scope->FindVar(var); + if (!main_var->IsType()) { + continue; + } + + auto &main_tensor = main_var->Get(); + + auto &dims = main_tensor.dims(); + + if (paddle::platform::is_gpu_place(main_tensor.place())) { + size_t numel = main_tensor.numel(); + ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type()); + platform::NCCLGroupGuard guard; + for (size_t i = 0; i < member_->places_.size(); ++i) { + auto place = member_->places_[i]; + void *buffer; + if (i == 0) { + buffer = const_cast(main_tensor.data()); + } else { auto local_scope = member_->local_scopes_[i]; - auto *t = local_scope->Var(var_desc->Name())->GetMutable(); + auto *t = local_scope->Var(var)->GetMutable(); t->Resize(dims); - t->mutable_data(cpu, main_tensor.type()); - paddle::framework::TensorCopy(main_tensor, cpu, t); + buffer = t->mutable_data(place, main_tensor.type()); } + auto &nccl_ctx = member_->nccl_ctxs_->at(place); + platform::dynload::ncclBcast(buffer, numel, data_type, 0, + nccl_ctx.comm_, nccl_ctx.stream()); + } + } else { + platform::CPUPlace cpu; + for (size_t i = 1; i < member_->places_.size(); ++i) { + auto local_scope = member_->local_scopes_[i]; + auto *t = local_scope->Var(var)->GetMutable(); + t->Resize(dims); + t->mutable_data(cpu, main_tensor.type()); + paddle::framework::TensorCopy(main_tensor, cpu, t); } } member_->nccl_ctxs_->WaitAll(); diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index c7c58b2b808383621a6d492f9188b0d36bfa6858..c048c3865f14822be4a0015e385ea1b8e05d0ced 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -36,11 +36,14 @@ class ParallelExecutor { explicit ParallelExecutor(size_t num_threads, bool use_event, const std::vector& places, const std::unordered_set& params, - const ProgramDesc& startup_program, + const std::unordered_set& bcast_vars, const ProgramDesc& main_program, const std::string& loss_var_name, Scope* scope, + const std::vector& local_scopes, bool allow_op_delay); + std::vector& GetLocalScopes(); + void Run(const std::vector& fetch_tensors, const std::string& fetched_var_name, const std::unordered_map& feed_tensors); @@ -51,7 +54,7 @@ class ParallelExecutor { ParallelExecutorPrivate* member_; - void BCastParamsToGPUs(const ProgramDesc& startup_program) const; + void BCastParamsToGPUs(const std::unordered_set& vars) const; }; } // namespace framework diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 17e38b1cf042657834b4d0d1c12cbbb92f19fa45..194df3e4a8b50700e2be01ce5ebca83b92501fb8 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include "paddle/fluid/framework/scope.h" #include // for unique_ptr -#include // for call_once #include #include "glog/logging.h" #include "paddle/fluid/framework/threadpool.h" @@ -39,6 +38,7 @@ Scope::~Scope() { } Scope& Scope::NewScope() const { + std::unique_lock lock(mutex_); kids_.push_back(new Scope(this)); return *kids_.back(); } @@ -92,6 +92,7 @@ std::vector Scope::LocalVarNames() const { } void Scope::DeleteScope(Scope* scope) { + std::unique_lock lock(mutex_); auto it = std::find(this->kids_.begin(), this->kids_.end(), scope); PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope); this->kids_.erase(it); @@ -103,7 +104,7 @@ void Scope::DeleteScope(Scope* scope) { } } -void Scope::EraseVars(std::vector& var_names) { +void Scope::EraseVars(const std::vector& var_names) { std::set var_set(var_names.begin(), var_names.end()); for (auto it = vars_.begin(); it != vars_.end();) { if (var_set.find(it->first) != var_set.end()) { diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index c1e1f49caaa5a60df0e97289aada465b45213971..c8cb70549f1d131b66fa7c6eeb35f3b7151a9e7f 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include // NOLINT #include #include #include @@ -51,13 +52,13 @@ class Scope { /// Create a variable with a scope-unique name. Variable* Var(std::string* name = nullptr); - void EraseVars(std::vector& var_names); + void EraseVars(const std::vector& var_names); /// Find a variable in the scope or any of its ancestors. Returns /// nullptr if cannot find. Variable* FindVar(const std::string& name) const; - const Scope& parent() const { return *parent_; } + const Scope* parent() const { return parent_; } /// Find the scope or an ancestor scope that contains the given variable. const Scope* FindScope(const Variable* var) const; @@ -88,6 +89,9 @@ class Scope { Scope const* parent_{nullptr}; DISABLE_COPY_AND_ASSIGN(Scope); + + private: + mutable std::mutex mutex_; }; } // namespace framework } // namespace paddle diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index aff427310f15be72f5c8d0fa1537ffa6bbe2881d..f417f62f3f75360f4ae1b7795608ae95200cfeb8 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,4 +1,4 @@ -set(FLUID_CORE_MODULES proto_desc paddle_memory lod_tensor executor prune init) +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor prune init) cc_library(paddle_fluid_api SRCS io.cc diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index a6b6c3f828f4c6f59fca42e4c3d9580d6c136524..ca2077d07411d2cd6095e0dc2a874af0890145c5 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -46,8 +46,8 @@ TEST(inference, image_classification) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: ---"; - TestInference(dirname, cpu_feeds, cpu_fetchs1, - FLAGS_repeat); + TestInference(dirname, cpu_feeds, + cpu_fetchs1, FLAGS_repeat); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -57,8 +57,8 @@ TEST(inference, image_classification) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: ---"; - TestInference(dirname, cpu_feeds, cpu_fetchs2, - FLAGS_repeat); + TestInference(dirname, cpu_feeds, + cpu_fetchs2, FLAGS_repeat); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 5118e66f1e7a36333ff4425361e54ec59e6ba05b..aae34ceda07fea6e881cf61b3755ec45d1d6f2dc 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -88,7 +88,7 @@ void CheckError(const paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0U) << "There are " << count << " different elements."; } -template +template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, const std::vector& cpu_fetchs, @@ -166,8 +166,16 @@ void TestInference(const std::string& dirname, // 6. Run the inference program { + if (!CreateVars) { + // If users don't want to create and destroy variables every time they + // run, they need to set `create_vars` to false and manually call + // `CreateVariables` before running. + executor.CreateVariables(*inference_program, scope, 0); + } + // Ignore the profiling results of the first run - executor.Run(*inference_program, scope, feed_targets, fetch_targets); + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); // Enable the profiler paddle::platform::EnableProfiler(state); @@ -178,7 +186,8 @@ void TestInference(const std::string& dirname, "run_inference", paddle::platform::DeviceContextPool::Instance().Get(place)); - executor.Run(*inference_program, scope, feed_targets, fetch_targets); + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); } // Disable the profiler and print the timing information diff --git a/paddle/fluid/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt index 8b3043af7a18787a08583d47b76da679ccb63740..709fc7e12e1db537ceece30c405c0e8a2582e8ca 100644 --- a/paddle/fluid/memory/CMakeLists.txt +++ b/paddle/fluid/memory/CMakeLists.txt @@ -1,20 +1,15 @@ add_subdirectory(detail) -cc_library(memory SRCS memory.cc DEPS place enforce) +cc_library(malloc SRCS malloc.cc DEPS buddy_allocator place enforce) cc_library(memcpy SRCS memcpy.cc DEPS place) -cc_library(paddle_memory +cc_library(memory DEPS - memory - memcpy - meta_data - meta_cache - memory_block - buddy_allocator - system_allocator) + malloc + memcpy) -cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) +cc_test(malloc_test SRCS malloc_test.cc DEPS malloc) #if (WITH_GPU) -# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place paddle_memory) +# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place memory) #endif() diff --git a/paddle/fluid/memory/detail/CMakeLists.txt b/paddle/fluid/memory/detail/CMakeLists.txt index b9c3fc31c1523abf3acbd116745bbf1596454aac..c725dba5e98c200c2542d97cb8f53a938f6b614a 100644 --- a/paddle/fluid/memory/detail/CMakeLists.txt +++ b/paddle/fluid/memory/detail/CMakeLists.txt @@ -1,3 +1,5 @@ +cc_library(memory_block SRCS memory_block.cc memory_block_desc.cc meta_cache.cc) + if(${WITH_GPU}) nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info) else(${WITH_GPU}) @@ -6,10 +8,4 @@ endif(${WITH_GPU}) cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator) -cc_library(meta_data SRCS meta_data.cc) - -cc_library(meta_cache SRCS meta_cache.cc) - -cc_library(memory_block SRCS memory_block.cc) - -cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS glog) +cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS memory_block system_allocator glog) diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 876837838648d6733b104a5496454f5dc58bbb71..4194ba197948b47003863196efdac1c08a7ae4f6 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -46,7 +46,8 @@ inline size_t align(size_t size, size_t alignment) { void* BuddyAllocator::Alloc(size_t unaligned_size) { // adjust allocation alignment - size_t size = align(unaligned_size + sizeof(Metadata), min_chunk_size_); + size_t size = + align(unaligned_size + sizeof(MemoryBlock::Desc), min_chunk_size_); // acquire the allocator lock std::lock_guard lock(mutex_); @@ -103,7 +104,7 @@ void BuddyAllocator::Free(void* p) { return; } - block->mark_as_free(cache_); + block->mark_as_free(&cache_); total_used_ -= block->total_size(cache_); total_free_ += block->total_size(cache_); @@ -122,7 +123,7 @@ void BuddyAllocator::Free(void* p) { right_buddy)); // merge its right buddy to the block - block->merge(cache_, right_buddy); + block->merge(&cache_, right_buddy); } } @@ -139,7 +140,7 @@ void BuddyAllocator::Free(void* p) { left_buddy->total_size(cache_), left_buddy)); // merge the block to its left buddy - left_buddy->merge(cache_, block); + left_buddy->merge(&cache_, block); block = left_buddy; } } @@ -163,13 +164,13 @@ size_t BuddyAllocator::Used() { return total_used_; } void* BuddyAllocator::SystemAlloc(size_t size) { size_t index = 0; - void* p = system_allocator_->Alloc(index, size); + void* p = system_allocator_->Alloc(&index, size); VLOG(10) << "Allocated " << p << " from system allocator."; if (p == nullptr) return nullptr; - static_cast(p)->init(cache_, MemoryBlock::HUGE_CHUNK, index, + static_cast(p)->init(&cache_, MemoryBlock::HUGE_CHUNK, index, size, nullptr, nullptr); return static_cast(p)->data(); @@ -187,14 +188,14 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { // Allocate a new maximum sized block size_t index = 0; - void* p = system_allocator_->Alloc(index, max_chunk_size_); + void* p = system_allocator_->Alloc(&index, max_chunk_size_); if (p == nullptr) return pool_.end(); VLOG(10) << "Creating and inserting new block " << p << " from system allocator"; - static_cast(p)->init(cache_, MemoryBlock::FREE_CHUNK, index, + static_cast(p)->init(&cache_, MemoryBlock::FREE_CHUNK, index, max_chunk_size_, nullptr, nullptr); // gpu fallback allocation @@ -238,11 +239,11 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it, VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_) << ") into"; - block->split(cache_, size); + block->split(&cache_, size); VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_) << ")"; - block->set_type(cache_, MemoryBlock::ARENA_CHUNK); + block->set_type(&cache_, MemoryBlock::ARENA_CHUNK); // the rest of memory if exist if (block->has_right_buddy(cache_)) { diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index a4ee70c2586f37e3b2328dedfe28135e14d8b18d..2f39d774d6fb6a2bc37877eb2f8b90bebd3cda28 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" +#include // NOLINT +#include +#include +#include +#include + +#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" -#include -#include -#include -#include - namespace paddle { namespace memory { namespace detail { diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index 07123f2669c3a829ff28e9fab5a404047c5a09c7..f34b922b25a0110690671d487f190e1b977a67bb 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -13,143 +13,142 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_cache.h" -#include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/platform/assert.h" namespace paddle { namespace memory { namespace detail { -void MemoryBlock::init(MetadataCache& cache, Type t, size_t index, size_t size, +void MemoryBlock::init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy) { - cache.store(this, Metadata(t, index, size - sizeof(Metadata), size, - static_cast(left_buddy), - static_cast(right_buddy))); + cache->save( + this, MemoryBlock::Desc(t, index, size - sizeof(MemoryBlock::Desc), size, + static_cast(left_buddy), + static_cast(right_buddy))); } -MemoryBlock::Type MemoryBlock::type(MetadataCache& cache) const { +MemoryBlock::Type MemoryBlock::type(const MetadataCache& cache) const { return cache.load(this).type; } -size_t MemoryBlock::size(MetadataCache& cache) const { +size_t MemoryBlock::size(const MetadataCache& cache) const { return cache.load(this).size; } -size_t MemoryBlock::total_size(MetadataCache& cache) const { +size_t MemoryBlock::index(const MetadataCache& cache) const { + return cache.load(this).index; +} + +size_t MemoryBlock::total_size(const MetadataCache& cache) const { return cache.load(this).total_size; } -MemoryBlock* MemoryBlock::left_buddy(MetadataCache& cache) const { +bool MemoryBlock::has_left_buddy(const MetadataCache& cache) const { + return left_buddy(cache) != nullptr; +} + +bool MemoryBlock::has_right_buddy(const MetadataCache& cache) const { + return right_buddy(cache) != nullptr; +} + +MemoryBlock* MemoryBlock::left_buddy(const MetadataCache& cache) const { return cache.load(this).left_buddy; } -MemoryBlock* MemoryBlock::right_buddy(MetadataCache& cache) const { +MemoryBlock* MemoryBlock::right_buddy(const MetadataCache& cache) const { return cache.load(this).right_buddy; } -void MemoryBlock::split(MetadataCache& cache, size_t size) { +void MemoryBlock::split(MetadataCache* cache, size_t size) { // make sure the split fits - PADDLE_ASSERT(total_size(cache) >= size); + PADDLE_ASSERT(total_size(*cache) >= size); // bail out if there is no room for another partition - if (total_size(cache) - size <= sizeof(Metadata)) { + if (total_size(*cache) - size <= sizeof(MemoryBlock::Desc)) { return; } // find the position of the split void* right_partition = reinterpret_cast(this) + size; - size_t remaining_size = total_size(cache) - size; + size_t remaining_size = total_size(*cache) - size; // Add the new block as a buddy - auto metadata = cache.load(this); + auto metadata = cache->load(this); // Write the metadata for the new block auto new_block_right_buddy = metadata.right_buddy; - cache.store( - static_cast(right_partition), - Metadata(FREE_CHUNK, index(cache), remaining_size - sizeof(Metadata), - remaining_size, this, new_block_right_buddy)); + cache->save(static_cast(right_partition), + MemoryBlock::Desc(FREE_CHUNK, index(*cache), + remaining_size - sizeof(MemoryBlock::Desc), + remaining_size, this, new_block_right_buddy)); metadata.right_buddy = static_cast(right_partition); - metadata.size = size - sizeof(Metadata); + metadata.size = size - sizeof(MemoryBlock::Desc); metadata.total_size = size; - cache.store(this, metadata); + cache->save(this, metadata); // Write metadata for the new block's right buddy if (new_block_right_buddy != nullptr) { - auto buddy_metadata = cache.load(new_block_right_buddy); + auto buddy_metadata = cache->load(new_block_right_buddy); buddy_metadata.left_buddy = static_cast(right_partition); - cache.store(new_block_right_buddy, buddy_metadata); + cache->save(new_block_right_buddy, buddy_metadata); } } -void MemoryBlock::merge(MetadataCache& cache, MemoryBlock* right_buddy) { +void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) { // only free blocks can be merged - PADDLE_ASSERT(type(cache) == FREE_CHUNK); - PADDLE_ASSERT(right_buddy->type(cache) == FREE_CHUNK); + PADDLE_ASSERT(type(*cache) == FREE_CHUNK); + PADDLE_ASSERT(right_buddy->type(*cache) == FREE_CHUNK); - auto metadata = cache.load(this); + auto metadata = cache->load(this); // link this->buddy's buddy - metadata.right_buddy = right_buddy->right_buddy(cache); + metadata.right_buddy = right_buddy->right_buddy(*cache); // link buddy's buddy -> this if (metadata.right_buddy != nullptr) { - auto buddy_metadata = cache.load(metadata.right_buddy); + auto buddy_metadata = cache->load(metadata.right_buddy); buddy_metadata.left_buddy = this; - cache.store(metadata.right_buddy, buddy_metadata); + cache->save(metadata.right_buddy, buddy_metadata); } - metadata.size += right_buddy->total_size(cache); - metadata.total_size += right_buddy->total_size(cache); + metadata.size += right_buddy->total_size(*cache); + metadata.total_size += right_buddy->total_size(*cache); - cache.store(this, metadata); - cache.store(right_buddy, Metadata(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); + cache->save(this, metadata); + cache->save(right_buddy, + MemoryBlock::Desc(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); } -void MemoryBlock::mark_as_free(MetadataCache& cache) { +void MemoryBlock::mark_as_free(MetadataCache* cache) { // check for double free or corruption - PADDLE_ASSERT(type(cache) != FREE_CHUNK); - PADDLE_ASSERT(type(cache) != INVALID_CHUNK); - + PADDLE_ASSERT(type(*cache) != FREE_CHUNK); + PADDLE_ASSERT(type(*cache) != INVALID_CHUNK); set_type(cache, FREE_CHUNK); } -void MemoryBlock::set_type(MetadataCache& cache, Type t) { - auto metadata = cache.load(this); - +void MemoryBlock::set_type(MetadataCache* cache, Type t) { + auto metadata = cache->load(this); metadata.type = t; - - cache.store(this, metadata); -} - -bool MemoryBlock::has_left_buddy(MetadataCache& cache) const { - return left_buddy(cache) != nullptr; -} - -bool MemoryBlock::has_right_buddy(MetadataCache& cache) const { - return right_buddy(cache) != nullptr; -} - -size_t MemoryBlock::index(MetadataCache& cache) const { - return cache.load(this).index; + cache->save(this, metadata); } void* MemoryBlock::data() const { - return const_cast(reinterpret_cast(this)) + 1; + return const_cast( + reinterpret_cast(this)) + + 1; } MemoryBlock* MemoryBlock::metadata() const { return const_cast(reinterpret_cast( - reinterpret_cast(this) - 1)); + reinterpret_cast(this) - 1)); } } // namespace detail diff --git a/paddle/fluid/memory/detail/memory_block.h b/paddle/fluid/memory/detail/memory_block.h index 72b40b73177d086aa912416e7f9cb3cd4ad5b45e..5cceba659beeec1b3c986dc43229f6725e3e11de 100644 --- a/paddle/fluid/memory/detail/memory_block.h +++ b/paddle/fluid/memory/detail/memory_block.h @@ -11,21 +11,21 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once -#include +#include +#include namespace paddle { namespace memory { namespace detail { -// Forward Declarations +// Forward declaration. class MetadataCache; -/*! \brief A class used to interpret the contents of a memory block */ -class MemoryBlock { - public: +// MemoryBlock represents Each allocated memory block, which contains +// MemoryBlock::Desc and the payload. +struct MemoryBlock { enum Type { FREE_CHUNK, // memory is free and idle ARENA_CHUNK, // memory is being occupied @@ -33,57 +33,96 @@ class MemoryBlock { INVALID_CHUNK // memory is invalid }; - public: - void init(MetadataCache& cache, Type t, size_t index, size_t size, + // init saves the MemoryBlock::Desc of the memory block in a MetadataCache. + // If it is a CPU memory block, the MetadataCache writes the + // MemoryBlock::Desc to the beginning of the block; or, if it is a GPU memory + // block, the MetadataCache writes the Meatadata to a std::map in + // the CPU. + void init(MetadataCache* cache, Type t, size_t index, size_t size, void* left_buddy, void* right_buddy); - public: - /*! \brief The type of the allocation */ - Type type(MetadataCache& cache) const; - - /*! \brief The size of the data region */ - size_t size(MetadataCache& cache) const; + // All these accessors returns fields in the MemoryBlock::Desc of the memory + // block. They all need a MetadataCache instance as their first + // parameter because they read the MemoryBlock::Desc from the cache. + Type type(const MetadataCache& cache) const; + size_t size(const MetadataCache& cache) const; + size_t index(const MetadataCache& cache) const; + size_t total_size(const MetadataCache& cache) const; + bool has_left_buddy(const MetadataCache& cache) const; + bool has_right_buddy(const MetadataCache& cache) const; + MemoryBlock* left_buddy(const MetadataCache& cache) const; + MemoryBlock* right_buddy(const MetadataCache& cache) const; - /*! \brief An index to track the allocator */ - size_t index(MetadataCache& cache) const; + // Split the allocation into left/right blocks. + void split(MetadataCache* cache, size_t size); - /*! \brief The total size of the block */ - size_t total_size(MetadataCache& cache) const; + // Merge left and right blocks together. + void merge(MetadataCache* cache, MemoryBlock* right_buddy); - /*! \brief Check the left buddy of the block */ - bool has_left_buddy(MetadataCache& cache) const; + // Mark the allocation as free. + void mark_as_free(MetadataCache* cache); - /*! \brief Check the right buddy of the block */ - bool has_right_buddy(MetadataCache& cache) const; - - /*! \brief Get the left buddy */ - MemoryBlock* left_buddy(MetadataCache& cache) const; - - /*! \brief Get the right buddy */ - MemoryBlock* right_buddy(MetadataCache& cache) const; - - public: - /*! \brief Split the allocation into left/right blocks */ - void split(MetadataCache& cache, size_t size); + // Change the type of the allocation. + void set_type(MetadataCache* cache, Type t); - /*! \brief Merge left and right blocks together */ - void merge(MetadataCache& cache, MemoryBlock* right_buddy); - - /*! \brief Mark the allocation as free */ - void mark_as_free(MetadataCache& cache); - - /*! \brief Change the type of the allocation */ - void set_type(MetadataCache& cache, Type t); - - public: - /*! \brief Get a pointer to the memory block's data */ void* data() const; - - /*! \brief Get a pointer to the memory block's metadata */ MemoryBlock* metadata() const; + // MemoryBlock::Desc describes a MemoryBlock. + struct Desc { + Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, + MemoryBlock* r); + Desc(); + + // Updates guard_begin and guard_end by hashes of the Metadata object. + void update_guards(); + + // Checks that guard_begin and guard_end are hashes of the Metadata object. + bool check_guards() const; + + // TODO(gangliao): compress this + size_t guard_begin = 0; + MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; + size_t index = 0; + size_t size = 0; + size_t total_size = 0; + MemoryBlock* left_buddy = nullptr; + MemoryBlock* right_buddy = nullptr; + size_t guard_end = 0; + }; +}; + +// A cache for accessing memory block meta-data that may be expensive +// to access directly. This class exists to unify the +// MemoryBlock::Desc format between GPU and CPU allocations. It should +// be removed when the CPU can access all GPU allocations directly via +// UVM. +class MetadataCache { public: - static size_t overhead(); + explicit MetadataCache(bool uses_gpu); + + // Disable copying and assignment. + MetadataCache(const MetadataCache&) = delete; + MetadataCache& operator=(const MetadataCache&) = delete; + + // Returns the MemoryBlock::Desc for a memory block. When MetadataCache is + // used to manage CPU memory, the MemoryBlock::Desc resides at the beginning + // of the memory block; when used to manage GPU memory, the + // Meatadata resides in CPU memory indexed by cache_. + MemoryBlock::Desc load(const MemoryBlock* memory_block) const; + + // Saves the MemoryBlock::Desc of a memory block into the cache. For CPU + // memory block, writes the MemoryBlock::Desc to the beginning of the memory + // block; whereas for GPU memory, writes it to cache_. + void save(MemoryBlock* memory_block, const MemoryBlock::Desc& meta_data); + + // For GPU memory block, erases its MemoryBlock::Desc from cache_. + void invalidate(MemoryBlock* memory_block); + + private: + typedef std::unordered_map MetadataMap; + MetadataMap cache_; + bool uses_gpu_; }; } // namespace detail diff --git a/paddle/fluid/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/memory_block_desc.cc similarity index 54% rename from paddle/fluid/memory/detail/meta_data.cc rename to paddle/fluid/memory/detail/memory_block_desc.cc index ad862af1705835c495a30232aa2bba2d2a56ad89..393dd9209c0aa443cd17c29b2f9de6eafb48bac9 100644 --- a/paddle/fluid/memory/detail/meta_data.cc +++ b/paddle/fluid/memory/detail/memory_block_desc.cc @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_data.h" - #include +#include "paddle/fluid/memory/detail/memory_block.h" + namespace paddle { namespace memory { namespace detail { -Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, - MemoryBlock* l, MemoryBlock* r) +MemoryBlock::Desc::Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts, + MemoryBlock* l, MemoryBlock* r) : type(t), index(i), size(s), @@ -29,7 +29,7 @@ Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, left_buddy(l), right_buddy(r) {} -Metadata::Metadata() +MemoryBlock::Desc::Desc() : type(MemoryBlock::INVALID_CHUNK), index(0), size(0), @@ -37,32 +37,36 @@ Metadata::Metadata() left_buddy(nullptr), right_buddy(nullptr) {} +namespace { + template -inline void hash_combine(std::size_t& seed, const T& v) { +inline void hash_combine(std::size_t* seed, const T& v) { std::hash hasher; - seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + (*seed) ^= hasher(v) + 0x9e3779b9 + ((*seed) << 6) + ((*seed) >> 2); } -inline size_t hash(const Metadata* metadata, size_t initial_seed) { +inline size_t hash(const MemoryBlock::Desc& metadata, size_t initial_seed) { size_t seed = initial_seed; - hash_combine(seed, (size_t)metadata->type); - hash_combine(seed, metadata->index); - hash_combine(seed, metadata->size); - hash_combine(seed, metadata->total_size); - hash_combine(seed, metadata->left_buddy); - hash_combine(seed, metadata->right_buddy); + hash_combine(&seed, static_cast(metadata.type)); + hash_combine(&seed, metadata.index); + hash_combine(&seed, metadata.size); + hash_combine(&seed, metadata.total_size); + hash_combine(&seed, metadata.left_buddy); + hash_combine(&seed, metadata.right_buddy); return seed; } -void Metadata::update_guards() { - guard_begin = hash(this, 1); - guard_end = hash(this, 2); +} // namespace + +void MemoryBlock::Desc::update_guards() { + guard_begin = hash(*this, 1); + guard_end = hash(*this, 2); } -bool Metadata::check_guards() const { - return guard_begin == hash(this, 1) && guard_end == hash(this, 2); +bool MemoryBlock::Desc::check_guards() const { + return guard_begin == hash(*this, 1) && guard_end == hash(*this, 2); } } // namespace detail diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 43249e842ad4d2419fed041e6c9056021e9663cd..b86e4f38c42a26e155f276f9b73cbed1d0d83f7d 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/detail/meta_cache.h" #include "glog/logging.h" #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/platform/assert.h" @@ -23,29 +22,28 @@ namespace detail { MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {} -Metadata MetadataCache::load(const MemoryBlock* block) { +MemoryBlock::Desc MetadataCache::load(const MemoryBlock* block) const { if (uses_gpu_) { - auto existing_metadata = cache_.find(block); - PADDLE_ASSERT(existing_metadata->second.check_guards()); - return existing_metadata->second; + auto existing_desc = cache_.find(block); + PADDLE_ASSERT(existing_desc->second.check_guards()); + return existing_desc->second; } else { - auto* meta = reinterpret_cast(block); - VLOG(10) << "Load MetaData type=" << meta->type; - PADDLE_ASSERT(meta->check_guards()); - return *reinterpret_cast(block); + auto* desc = reinterpret_cast(block); + VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type; + PADDLE_ASSERT(desc->check_guards()); + return *reinterpret_cast(block); } } -void MetadataCache::store(MemoryBlock* block, - const Metadata& original_metadata) { - auto metadata = original_metadata; - - metadata.update_guards(); +void MetadataCache::save(MemoryBlock* block, + const MemoryBlock::Desc& original_desc) { + auto desc = original_desc; + desc.update_guards(); if (uses_gpu_) { - cache_[block] = metadata; + cache_[block] = desc; } else { - *reinterpret_cast(block) = metadata; + *reinterpret_cast(block) = desc; } } diff --git a/paddle/fluid/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h deleted file mode 100644 index 3283d756a6e7f7f1750442039797846bdad51125..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/detail/meta_cache.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -/** - * \brief A cache for accessing memory block meta-data that may be expensive - * to access directly. - * - * \note This class exists to unify the metadata format between GPU and CPU - * allocations. It should be removed when the CPU can access all GPU - * allocations directly via UVM. - */ -class MetadataCache { - public: - explicit MetadataCache(bool uses_gpu); - - public: - /*! \brief Load the associated metadata for the specified memory block. */ - Metadata load(const MemoryBlock* memory_block); - - /*! \brief Store the associated metadata for the specified memory block. */ - void store(MemoryBlock* memory_block, const Metadata& meta_data); - - /*! \brief Indicate that the specified metadata will no longer be used. */ - void invalidate(MemoryBlock* memory_block); - - public: - MetadataCache(const MetadataCache&) = delete; - MetadataCache& operator=(const MetadataCache&) = delete; - - private: - bool uses_gpu_; - - private: - typedef std::unordered_map MetadataMap; - - private: - MetadataMap cache_; -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h deleted file mode 100644 index 14895ee8727e98186b1f1295321951e12753fef6..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/detail/meta_data.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/fluid/memory/detail/memory_block.h" - -#include - -namespace paddle { -namespace memory { -namespace detail { - -class Metadata { - public: - Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l, - MemoryBlock* r); - Metadata(); - - public: - /*! \brief Update the guards when metadata is changed */ - void update_guards(); - - /*! \brief Check consistency to previous modification */ - bool check_guards() const; - - public: - // TODO(gangliao): compress this - // clang-format off - size_t guard_begin = 0; - MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK; - size_t index = 0; - size_t size = 0; - size_t total_size = 0; - MemoryBlock* left_buddy = nullptr; - MemoryBlock* right_buddy = nullptr; - size_t guard_end = 0; - // clang-format on -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index a45f8c33ee5956f3409ee1b7c43628aa0acafb98..d5390529163491c2711e50ffad236534e88b73ee 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/detail/system_allocator.h" -#include "paddle/fluid/platform/assert.h" -#include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include // for malloc and free #include // for mlock and munlock #include // for std::max #include "gflags/gflags.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" // If use_pinned_memory is true, CPUAllocator calls mlock, which // returns pinned and locked memory as staging areas for data exchange @@ -35,13 +35,13 @@ namespace paddle { namespace memory { namespace detail { -void* CPUAllocator::Alloc(size_t& index, size_t size) { +void* CPUAllocator::Alloc(size_t* index, size_t size) { // According to http://www.cplusplus.com/reference/cstdlib/malloc/, // malloc might not return nullptr if size is zero, but the returned // pointer shall not be dereferenced -- so we make it nullptr. if (size <= 0) return nullptr; - index = 0; // unlock memory + *index = 0; // unlock memory void* p; @@ -56,7 +56,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { if (p != nullptr) { if (FLAGS_use_pinned_memory) { - index = 1; + *index = 1; mlock(p, size); // lock memory } } @@ -75,7 +75,7 @@ bool CPUAllocator::UseGpu() const { return false; } #ifdef PADDLE_WITH_CUDA -void* GPUAllocator::Alloc(size_t& index, size_t size) { +void* GPUAllocator::Alloc(size_t* index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr // if size is 0. We just make sure it does. if (size <= 0) return nullptr; @@ -93,7 +93,7 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) { } if (result == cudaSuccess) { - index = 0; + *index = 0; gpu_alloc_size_ += size; return p; } else { @@ -133,7 +133,7 @@ bool GPUAllocator::UseGpu() const { return true; } // PINNED memory allows direct DMA transfers by the GPU to and from system // memory. It’s locked to a physical address. -void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { +void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { if (size <= 0) return nullptr; // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size @@ -154,7 +154,7 @@ void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { cudaError_t result = cudaMallocHost(&p, size); if (result == cudaSuccess) { - index = 1; // PINNED memory + *index = 1; // PINNED memory cuda_pinnd_alloc_size_ += size; return p; } else { diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index e3c50ef6483c61e2016bbd967a4100057c87dca3..a0386a2dad1bb7faf54197a47ca7a5b6d9488817 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -29,14 +29,14 @@ namespace detail { class SystemAllocator { public: virtual ~SystemAllocator() {} - virtual void* Alloc(size_t& index, size_t size) = 0; + virtual void* Alloc(size_t* index, size_t size) = 0; virtual void Free(void* p, size_t size, size_t index) = 0; virtual bool UseGpu() const = 0; }; class CPUAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; }; @@ -46,7 +46,7 @@ class GPUAllocator : public SystemAllocator { public: explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; @@ -58,7 +58,7 @@ class GPUAllocator : public SystemAllocator { class CUDAPinnedAllocator : public SystemAllocator { public: - virtual void* Alloc(size_t& index, size_t size); + virtual void* Alloc(size_t* index, size_t size); virtual void Free(void* p, size_t size, size_t index); virtual bool UseGpu() const; diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index 3e1926f632c57b7906e4a76f43ff7a753d71d97f..268260142c579ea9301d89fcec1613ce5b0e15a5 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -22,11 +22,11 @@ limitations under the License. */ DECLARE_bool(use_pinned_memory); -void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { +void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { bool freed = false; { size_t index; - void* p = a.Alloc(index, size); + void* p = a->Alloc(&index, size); if (size > 0) { EXPECT_NE(p, nullptr); } else { @@ -36,7 +36,7 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { int* i = static_cast(p); std::shared_ptr ptr(i, [&](void* p) { freed = true; - a.Free(p, size, index); + a->Free(p, size, index); }); } EXPECT_TRUE(freed); @@ -45,21 +45,21 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { TEST(CPUAllocator, NoLockMem) { FLAGS_use_pinned_memory = false; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } TEST(CPUAllocator, LockMem) { FLAGS_use_pinned_memory = true; paddle::memory::detail::CPUAllocator a; - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #ifdef PADDLE_WITH_CUDA TEST(GPUAllocator, Alloc) { paddle::memory::detail::GPUAllocator a(0); - TestAllocator(a, 2048); - TestAllocator(a, 0); + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #endif diff --git a/paddle/fluid/memory/memory.cc b/paddle/fluid/memory/malloc.cc similarity index 99% rename from paddle/fluid/memory/memory.cc rename to paddle/fluid/memory/malloc.cc index 2c13dbc6d51bfa3853cec5270e8115c899f522ea..0c74f62de5c6f5d432ee928945db6dcf385ca209 100644 --- a/paddle/fluid/memory/memory.cc +++ b/paddle/fluid/memory/malloc.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/memory/malloc.h" #include "glog/logging.h" diff --git a/paddle/fluid/memory/malloc.h b/paddle/fluid/memory/malloc.h new file mode 100644 index 0000000000000000000000000000000000000000..3e6bfddd69cb16edf323d040ea5369cd551f299e --- /dev/null +++ b/paddle/fluid/memory/malloc.h @@ -0,0 +1,104 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace memory { + +/** + * \brief Allocate memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] size Allocation size. + * + * \return Allocated memory block address. + * + * \note If return nullptr, it indicates memory allocation failed + * because insufficient memory in current system. When Alloc + * function is invoked, you must check the returned memory + * address is valid or not. + */ +template +void* Alloc(Place place, size_t size); + +/** + * \brief Free memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] ptr Memory block address to free. + * + */ +template +void Free(Place place, void* ptr); + +/** + * \brief Total size of used memory in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * + */ +template +size_t Used(Place place); + +struct Usage : public boost::static_visitor { + size_t operator()(const platform::CPUPlace& cpu) const; + size_t operator()(const platform::CUDAPlace& gpu) const; + size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const; +}; + +size_t memory_usage(const platform::Place& p); + +/** + * \brief Free memory block in one place. + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ +template +class PODDeleter { + static_assert(std::is_pod::value, "T must be POD"); + + public: + explicit PODDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, static_cast(ptr)); } + + private: + Place place_; +}; + +/** + * \brief Free memory block in one place does not meet POD + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ +template +class PlainDeleter { + public: + explicit PlainDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } + + private: + Place place_; +}; + +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/malloc_test.cc similarity index 95% rename from paddle/fluid/memory/memory_test.cc rename to paddle/fluid/memory/malloc_test.cc index 9fbbe62559b1e29d6942a1ada62558b20830489b..d39466ef60c3750600dea726a6570397423d42f6 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/malloc_test.cc @@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/memory/malloc.h" #include #include "gtest/gtest.h" #include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" @@ -28,7 +27,7 @@ inline bool is_aligned(void const *p) { } size_t align(size_t size, paddle::platform::CPUPlace place) { - size += sizeof(paddle::memory::detail::Metadata); + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); size_t alignment = paddle::platform::CpuMinChunkSize(); size_t remaining = size % alignment; return remaining == 0 ? size : size + (alignment - remaining); @@ -86,7 +85,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { #ifdef PADDLE_WITH_CUDA size_t align(size_t size, paddle::platform::CUDAPlace place) { - size += sizeof(paddle::memory::detail::Metadata); + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); size_t alignment = paddle::platform::GpuMinChunkSize(); size_t remaining = size % alignment; return remaining == 0 ? size : size + (alignment - remaining); @@ -142,7 +141,7 @@ TEST(BuddyAllocator, GPUMultAlloc) { } size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) { - size += sizeof(paddle::memory::detail::Metadata); + size += sizeof(paddle::memory::detail::MemoryBlock::Desc); size_t alignment = paddle::platform::CUDAPinnedMinChunkSize(); size_t remaining = size % alignment; return remaining == 0 ? size : size + (alignment - remaining); diff --git a/paddle/fluid/memory/memory.h b/paddle/fluid/memory/memory.h index 3e6bfddd69cb16edf323d040ea5369cd551f299e..8d904e3be56abf0974ba7379f7ca1b676fcb0409 100644 --- a/paddle/fluid/memory/memory.h +++ b/paddle/fluid/memory/memory.h @@ -14,91 +14,5 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/place.h" - -namespace paddle { -namespace memory { - -/** - * \brief Allocate memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] size Allocation size. - * - * \return Allocated memory block address. - * - * \note If return nullptr, it indicates memory allocation failed - * because insufficient memory in current system. When Alloc - * function is invoked, you must check the returned memory - * address is valid or not. - */ -template -void* Alloc(Place place, size_t size); - -/** - * \brief Free memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] ptr Memory block address to free. - * - */ -template -void Free(Place place, void* ptr); - -/** - * \brief Total size of used memory in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * - */ -template -size_t Used(Place place); - -struct Usage : public boost::static_visitor { - size_t operator()(const platform::CPUPlace& cpu) const; - size_t operator()(const platform::CUDAPlace& gpu) const; - size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const; -}; - -size_t memory_usage(const platform::Place& p); - -/** - * \brief Free memory block in one place. - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PODDeleter { - static_assert(std::is_pod::value, "T must be POD"); - - public: - explicit PODDeleter(Place place) : place_(place) {} - void operator()(T* ptr) { Free(place_, static_cast(ptr)); } - - private: - Place place_; -}; - -/** - * \brief Free memory block in one place does not meet POD - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PlainDeleter { - public: - explicit PlainDeleter(Place place) : place_(place) {} - void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } - - private: - Place place_; -}; - -} // namespace memory -} // namespace paddle +#include "paddle/fluid/memory/malloc.h" +#include "paddle/fluid/memory/memcpy.h" diff --git a/paddle/fluid/memory/pinned_memory_test.cu b/paddle/fluid/memory/pinned_memory_test.cu index a000001f41788fb16ac075426f06357cbe42d642..0d898f59ee1b8c783c5357aa7e27581a993a6d30 100644 --- a/paddle/fluid/memory/pinned_memory_test.cu +++ b/paddle/fluid/memory/pinned_memory_test.cu @@ -15,7 +15,6 @@ limitations under the License. */ #include #include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 84eabab563e3404ad2a28bf76116c592db04742e..5ff987ad8b3ba3c9195e87e6c11e70ac98fa0a11 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -263,7 +263,7 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) -cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory) +cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 6ff363d766db7dd97e1bc193ef7b4a095a7b7c24..ab7c61227114fe7a0ce2ff2515dd560706058b64 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -13,8 +13,8 @@ limitations under the License. */ #include "mkldnn.hpp" -#include "mkldnn_activation_op.h" #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/mkldnn_activation_op.h" namespace paddle { namespace operators { @@ -40,18 +40,24 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, const T *dst_data = dst->template mutable_data(ctx.GetPlace()); // get memory dim - PADDLE_ENFORCE(src->dims().size() == 4, - "Input dim must be with 4, i.e. NCHW"); + PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, + "Input dim must be with 2 or 4"); std::vector src_tz = framework::vectorize2int(src->dims()); // create memory description - // TODO(kbinias-intel): support more formats - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src_data); - auto dst_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)dst_data); + auto src_memory = + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(src_data))); + auto dst_memory = + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(dst_data))); auto forward_desc = mkldnn::eltwise_forward::desc( mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); @@ -91,15 +97,21 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, std::vector src_tz = framework::vectorize2int(x->dims()); // create memory description - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src); + auto src_memory = mkldnn::memory( + {data_md, mkldnn_engine}, static_cast(const_cast(src))); auto diff_src_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_src); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_src))); auto diff_dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_dst); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_dst))); auto backward_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index 3adeeda90645ca983d9d9229b4cc1c4c90302206..719a7465b8d58ef8588ff1e83c2b971eb6fbb00f 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -5,5 +5,5 @@ if(WITH_DISTRIBUTE) set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr cares zlib protobuf sendrecvop_grpc) - cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf) + cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_table_op) endif() diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index ef987d07f08525bff5267cdc2076ae767417e4f1..8bbfd1f15925992efdeaaffbbe7b350ffbcee889 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -138,7 +138,7 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep, auto* var = p_scope->FindVar(in_var_name_val); ::grpc::ByteBuffer req; - SerializeToByteBuffer(in_var_name_val, var, *p_ctx, &req); + SerializeToByteBuffer(in_var_name_val, var, *p_ctx, &req, out_var_name_val); // var handle VarHandle var_h; diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 2e7bf1921a26fc88d854e4db2c501548695a136a..d5fc163bc25409e0607b149b61c6266b38119d9d 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -138,39 +138,48 @@ class RequestPrefetch final : public RequestBase { framework::Scope* scope, const platform::DeviceContext* dev_ctx, framework::Executor* executor, - framework::ProgramDesc* program, int blkid) + framework::ProgramDesc* program, + framework::ExecutorPrepareContext* prefetch_ctx) : RequestBase(service, cq, dev_ctx), responder_(&ctx_), scope_(scope), executor_(executor), program_(program), - blkid_(blkid) { + prefetch_ctx_(prefetch_ctx) { + request_.reset(new VariableResponse(scope, dev_ctx_)); int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); - service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, - cq_, this); + service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, + cq_, cq_, this); } virtual ~RequestPrefetch() {} - virtual std::string GetReqName() { return request_.varname(); } + virtual std::string GetReqName() { return request_->Varname(); } virtual void Process() { // prefetch process... ::grpc::ByteBuffer reply; - // TODO(Yancey1989): execute the Block which containers prefetch ops - VLOG(3) << "RequestPrefetch Process in"; + std::string var_name = request_->OutVarname(); + auto var_desc = program_->Block(0).FindVar(var_name); + framework::Scope* local_scope = &scope_->NewScope(); + auto* var = local_scope->FindVar(var_name); + InitializeVariable(var, var_desc->GetType()); + executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false); + + SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); responder_.Finish(reply, ::grpc::Status::OK, this); status_ = FINISH; } protected: - sendrecv::VariableMessage request_; + std::shared_ptr request_; ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; framework::Scope* scope_; framework::Executor* executor_; framework::ProgramDesc* program_; + framework::ExecutorPrepareContext* prefetch_ctx_; int blkid_; }; @@ -268,7 +277,7 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { } RequestPrefetch* prefetch = new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, - executor_, program_, prefetch_blk_id_); + executor_, program_, prefetch_ctx_); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); } diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 380447f47c142bdc16e60f78c4b2d94235ec5060..b6110f92ed4f38a156e0c99ecfb399f3f47a169e 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -63,6 +63,10 @@ class AsyncGRPCServer final { void SetExecutor(framework::Executor *executor) { executor_ = executor; } + void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { + prefetch_ctx_ = prepared; + } + int GetSelectedPort() { return selected_port_; } const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } @@ -111,6 +115,7 @@ class AsyncGRPCServer final { std::unique_ptr t_prefetch_; int prefetch_blk_id_; + framework::ExecutorPrepareContext *prefetch_ctx_; framework::ProgramDesc *program_; framework::Executor *executor_; int selected_port_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index b89aed0157de8e95564015b3e7f42316a39537f5..c51933718f4ca78e87c77e007c485642000d247d 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -20,43 +20,121 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/grpc_client.h" #include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + namespace framework = paddle::framework; namespace platform = paddle::platform; namespace detail = paddle::operators::detail; +USE_OP(lookup_table); + std::unique_ptr rpc_service_; +framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) { + auto root_block = program->MutableBlock(0); + auto* block = program->AppendBlock(*root_block); + + framework::VariableNameMap input({{"W", {"w"}}, {"Ids", {"ids"}}}); + framework::VariableNameMap output({{"Output", {"out"}}}); + auto op = block->AppendOp(); + op->SetType("lookup_table"); + op->SetInput("W", {"w"}); + op->SetInput("Ids", {"ids"}); + op->SetOutput("Out", {"out"}); + + auto& out = *root_block->Var("out"); + out.SetType(framework::proto::VarType::SELECTED_ROWS); + out.SetShape({10, 10}); + + return block; +} + +void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { + auto w_var = scope->Var("w"); + w_var->GetMutable(); + + auto out_var = scope->Var("out"); + out_var->GetMutable(); + + auto ids_var = scope->Var("ids"); + ids_var->GetMutable(); +} + +void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto ids_var = scope->Var("ids")->GetMutable(); + auto rows = ids_var->mutable_rows(); + for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i * 2); + ids_var->mutable_value()->Resize({rows_numel, 1}); + ids_var->mutable_value()->mutable_data(*place); +} + +void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto w = scope->Var("w")->GetMutable(); + auto rows = w->mutable_rows(); + for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i); + auto w_value = w->mutable_value(); + w_value->Resize({rows_numel, 10}); + + auto ptr = w_value->mutable_data(*place); + + for (int64_t i = 0; i < w_value->numel(); ++i) { + ptr[i] = static_cast(i / 10); + } +} + void StartServer(const std::string& endpoint) { rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + framework::ProgramDesc program; + framework::Scope scope; + platform::CPUPlace place; + framework::Executor exe(place); + platform::CPUDeviceContext ctx(place); + auto* block = AppendPrefetchBlcok(&program); + auto prepared = exe.Prepare(program, block->ID()); + InitTensorsOnServer(&scope, &place, 10); + + rpc_service_->SetProgram(&program); + rpc_service_->SetPrefetchPreparedCtx(prepared.get()); + rpc_service_->SetDevCtx(&ctx); + rpc_service_->SetScope(&scope); + rpc_service_->SetExecutor(&exe); + rpc_service_->RunSyncUpdate(); } TEST(PREFETCH, CPU) { // start up a server instance backend - // TODO(Yancey1989): Need to start a server with optimize blocks and - // prefetch blocks. std::thread server_thread(StartServer, "127.0.0.1:8889"); + sleep(2); framework::Scope scope; platform::CPUPlace place; platform::CPUDeviceContext ctx(place); // create var on local scope - std::string in_var_name("in"); + int64_t rows_numel = 5; + InitTensorsOnClient(&scope, &place, rows_numel); + std::string in_var_name("ids"); std::string out_var_name("out"); - auto* in_var = scope.Var(in_var_name); - auto* in_tensor = in_var->GetMutable(); - in_tensor->Resize({10, 10}); - VLOG(3) << "before mutable_data"; - in_tensor->mutable_data(place); - scope.Var(out_var_name); - - VLOG(3) << "before fetch"; detail::RPCClient client; client.AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name, out_var_name); client.Wait(); + auto var = scope.Var(out_var_name); + auto value = var->GetMutable()->value(); + auto ptr = value.mutable_data(place); + rpc_service_->ShutDown(); server_thread.join(); rpc_service_.reset(nullptr); + + for (int64_t i = 0; i < rows_numel; ++i) { + EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast(i * 2)); + } } diff --git a/paddle/fluid/operators/detail/send_recv.proto b/paddle/fluid/operators/detail/send_recv.proto index fc12e82a7e6bd10262092d1ca367980df64e91c2..02bb2b9cebb87b83aa1cbef0c644f969b4d17284 100644 --- a/paddle/fluid/operators/detail/send_recv.proto +++ b/paddle/fluid/operators/detail/send_recv.proto @@ -21,7 +21,7 @@ service SendRecvService { rpc SendVariable(VariableMessage) returns (VoidMessage) {} // Argument VariableMessage for GetVariable should only contain varname. rpc GetVariable(VariableMessage) returns (VariableMessage) {} - // Prefetch variable by Ids + // pre-fetch variable by given variable name and Ids rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} } @@ -67,6 +67,8 @@ message VariableMessage { bytes serialized = 8; // selected_rows data bytes rows = 9; + // Look up table block execution output variable name. + string out_varname = 10; } message VoidMessage {} diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index f8576d01b10f4c0fda4d12d371b2966739acfc21..16c612c45a37dd2ffd17f8d5f5946df30e9b3fe6 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -30,11 +30,9 @@ namespace detail { void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg) { + ::grpc::ByteBuffer* msg, + const std::string& out_name) { using VarMsg = sendrecv::VariableMessage; - sendrecv::VariableMessage request; - std::string header; - request.AppendToString(&header); // When using GPU, need to free the copied CPU buffer // when the ByteBuffer destroies // TODO(typhoonzero): add unref here, if we have dependent @@ -52,6 +50,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, e.WriteUint64(VarMsg::kTypeFieldNumber, 1); } + if (!out_name.empty()) { + e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name); + } switch (framework::ToVarType(var->Type())) { case framework::proto::VarType_Type_LOD_TENSOR: { auto tensor = var->Get(); diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index d7954440846b8db9a9add0110fb9a546a762774d..c72e1bd076f670458f3915072154847db6205092 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -46,7 +46,8 @@ typedef void (*DestroyCallback)(void*); void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg); + ::grpc::ByteBuffer* msg, + const std::string& out_varname = std::string()); void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc index 78e1d274a92241b5f2093beb63acdc8c497dfb83..c9d7fd6d1581f6f4182e9e3e0d633c13a3c336a5 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -416,6 +416,20 @@ int VariableResponse::Parse(Source* source) { } break; } + case sendrecv::VariableMessage::kOutVarnameFieldNumber: { + uint32_t length; + if ((wt != WIRETYPE_LENGTH_DELIMITED) || !input.ReadVarint32(&length)) { + return tag; + } + + std::string temp; + if (!input.ReadString(&temp, length)) { + return tag; + } + + meta_.set_out_varname(temp); + break; + } default: { // Unknown tag, return unknown error. diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 050b6b84010b4f3e95bc88e5bb738ff18b7fe423..93b0d3cfb4f7d7f336414361773f872d7b259482 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -55,6 +55,7 @@ class VariableResponse { int Parse(const ::grpc::ByteBuffer& byte_buffer); inline std::string Varname() { return meta_.varname(); } + inline std::string OutVarname() { return meta_.out_varname(); } // should call parse first. framework::Variable* GetVar() { return scope_->FindVar(meta_.varname()); } diff --git a/paddle/fluid/operators/go_op.cc b/paddle/fluid/operators/go_op.cc index cfa797717d78aa72e1b931b6db6e153270b3424e..58fe32446217e07235b40b9b78190094e57e4951 100644 --- a/paddle/fluid/operators/go_op.cc +++ b/paddle/fluid/operators/go_op.cc @@ -56,11 +56,11 @@ class GoOp : public framework::OperatorBase { // TODO(varunarora): Consider moving this root scope lookup to scope.h. const framework::Scope *root_scope = &scope; - const framework::Scope *parent_scope = &(root_scope->parent()); + const framework::Scope *parent_scope = root_scope->parent(); while (parent_scope != nullptr) { root_scope = parent_scope; - parent_scope = &(parent_scope->parent()); + parent_scope = parent_scope->parent(); } framework::BlockDesc *block = Attr(kBlock); diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index 99f01c2a255ade81421c2bba95ff3d38ced6f87c..bd19d8908e35e51872d324ea5aa6bb02110d5a92 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -35,7 +37,7 @@ class LoDResetKernel : public framework::OpKernel { if (lod_t->lod().size() > 0) { auto y_lod = lod_t->lod(); auto last_level = y_lod[y_lod.size() - 1]; - PADDLE_ENFORCE_EQ(last_level.back(), in->dims()[0], + PADDLE_ENFORCE_EQ((int64_t)(last_level.back()), in->dims()[0], "Last value of `Y`'s last level LoD should be equal " "to the first dimension of `X`"); out->set_lod(y_lod); diff --git a/paddle/fluid/operators/reader/create_batch_reader_op.cc b/paddle/fluid/operators/reader/create_batch_reader_op.cc index 277f2856c07b3fec2113486539aec1d9139fae92..04c5872bef4600e30ba572a025cc5f0a5e9839ca 100644 --- a/paddle/fluid/operators/reader/create_batch_reader_op.cc +++ b/paddle/fluid/operators/reader/create_batch_reader_op.cc @@ -39,10 +39,13 @@ class CreateBatchReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); out->Reset( new BatchReader(underlying_reader.Get(), Attr("batch_size"))); } diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 96c0c1cbe6d588364416925a7ab1bc8f90ac6fd7..ed868786ab2a80efa42574ed4f579c633ce0becf 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -99,10 +99,13 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); auto place_str = Attr("place"); platform::Place place; diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index 47d9989bc8748840ec2d39587fde24355d90b6b4..b72ccc77a3e1ec30fd817471d3ffd667974ae684 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -62,12 +62,15 @@ class CreateMultiPassReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - auto& out = detail::Ref(scope.FindVar(Output("Out"))); int pass_num = Attr("pass_num"); - out.GetMutable()->Reset( - new MultiPassReader(underlying_reader.Get(), pass_num)); + out->Reset(new MultiPassReader(underlying_reader.Get(), pass_num)); } }; diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 3a1f3805a0483c2f5eabdc7432556051d8308964..b164ce232d6bea7b4ff0c67ee0a7dd83b14f61a2 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -80,10 +80,14 @@ class CreateShuffleReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - auto& var = detail::Ref(scope.FindVar(Output("Out"))); - var.GetMutable()->Reset( + out->Reset( new ShuffleReader(underlying_reader.Get(), static_cast(Attr("buffer_size")))); } diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 6780b8cc6deca64e9eaefa0b40d309449e730c8c..917bdc64abf608b8ade70c47f76a8adffb32046a 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -42,12 +42,12 @@ ENDIF() # memcpy depends on device_context, here add deps individually for # avoiding cycle dependencies -cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator - system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) +cc_library(device_context SRCS device_context.cc DEPS malloc + place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) -nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) +nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index b716ad9df41330bd6e22937381d24e33fa3a7914..d60aecf96c8828a5656f81fd3602cfb2e66990cf 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -8,13 +8,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/float16.h" + +#include + +#include "gtest/gtest.h" #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" -#include - namespace paddle { namespace platform { @@ -74,24 +75,27 @@ TEST(float16, conversion_cpu) { // Conversion operator EXPECT_EQ(Eigen::half(float16(1.0f)).x, 0x3c00); - EXPECT_EQ(float(float16(0.5f)), 0.5f); - EXPECT_NEAR(double(float16(0.33333)), 0.33333, 0.0001); - EXPECT_EQ(int(float16(-1)), -1); - EXPECT_EQ(bool(float16(true)), true); + EXPECT_EQ(static_cast(float16(0.5f)), 0.5f); + EXPECT_NEAR(static_cast(float16(0.33333)), 0.33333, 0.0001); + EXPECT_EQ(static_cast(float16(-1)), -1); + EXPECT_EQ(static_cast(float16(true)), true); } TEST(float16, arithmetic_cpu) { - EXPECT_EQ(float(float16(1) + float16(1)), 2); - EXPECT_EQ(float(float16(5) + float16(-5)), 0); - EXPECT_NEAR(float(float16(0.33333f) + float16(0.66667f)), 1.0f, 0.001); - EXPECT_EQ(float(float16(3) - float16(5)), -2); - EXPECT_NEAR(float(float16(0.66667f) - float16(0.33333f)), 0.33334f, 0.001); - EXPECT_NEAR(float(float16(3.3f) * float16(2.0f)), 6.6f, 0.01); - EXPECT_NEAR(float(float16(-2.1f) * float16(-3.0f)), 6.3f, 0.01); - EXPECT_NEAR(float(float16(2.0f) / float16(3.0f)), 0.66667f, 0.001); - EXPECT_EQ(float(float16(1.0f) / float16(2.0f)), 0.5f); - EXPECT_EQ(float(-float16(512.0f)), -512.0f); - EXPECT_EQ(float(-float16(-512.0f)), 512.0f); + EXPECT_EQ(static_cast(float16(1) + float16(1)), 2); + EXPECT_EQ(static_cast(float16(5) + float16(-5)), 0); + EXPECT_NEAR(static_cast(float16(0.33333f) + float16(0.66667f)), 1.0f, + 0.001); + EXPECT_EQ(static_cast(float16(3) - float16(5)), -2); + EXPECT_NEAR(static_cast(float16(0.66667f) - float16(0.33333f)), + 0.33334f, 0.001); + EXPECT_NEAR(static_cast(float16(3.3f) * float16(2.0f)), 6.6f, 0.01); + EXPECT_NEAR(static_cast(float16(-2.1f) * float16(-3.0f)), 6.3f, 0.01); + EXPECT_NEAR(static_cast(float16(2.0f) / float16(3.0f)), 0.66667f, + 0.001); + EXPECT_EQ(static_cast(float16(1.0f) / float16(2.0f)), 0.5f); + EXPECT_EQ(static_cast(-float16(512.0f)), -512.0f); + EXPECT_EQ(static_cast(-float16(-512.0f)), 512.0f); } TEST(float16, comparison_cpu) { diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 567209df4edc483bcb5c6264c62034ddff50c413..577fc24ceb1d3c83cc0546dc5db9c8c7c1f01f86 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -36,19 +36,19 @@ limitations under the License. */ half *in1, *in2, *out; \ half *d_in1, *d_in2, *d_out; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - cudaMalloc((void**)&d_out, size); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ - out = (half*)malloc(size); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(size)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \ - EXPECT_EQ(float(float16(out[0])), v_out); \ + EXPECT_EQ(static_cast(float16(out[0])), v_out); \ free(in1); \ free(in2); \ free(out); \ @@ -63,17 +63,17 @@ limitations under the License. */ half *in1, *in2; \ half *d_in1, *d_in2; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ op_type<<<1, 1>>>(d_in1, d_in2); \ cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \ - EXPECT_EQ(float(float16(in1[0])), v_out); \ + EXPECT_EQ(static_cast(float16(in1[0])), v_out); \ free(in1); \ free(in2); \ cudaFree(d_in1); \ @@ -87,12 +87,12 @@ limitations under the License. */ half *d_in1, *d_in2; \ bool *out, *d_out; \ int size = sizeof(half); \ - cudaMalloc((void**)&d_in1, size); \ - cudaMalloc((void**)&d_in2, size); \ - cudaMalloc((void**)&d_out, 1); \ - in1 = (half*)malloc(size); \ - in2 = (half*)malloc(size); \ - out = (bool*)malloc(1); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), 1); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(1)); \ in1[0] = half(float16(v_in1)); \ in2[0] = half(float16(v_in2)); \ cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ @@ -130,13 +130,13 @@ void TestNeg(float v_in, float v_out) { LOG(INFO) << "Test Neg on GPU!"; half *in, *d_in; int size = sizeof(half); - cudaMalloc((void**)&d_in, size); - in = (half*)malloc(size); + cudaMalloc(reinterpret_cast(&d_in), size); + in = reinterpret_cast(malloc(size)); in[0] = half(float16(v_in)); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); Neg<<<1, 1>>>(d_in); cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost); - EXPECT_EQ(float(float16(in[0])), v_out); + EXPECT_EQ(static_cast(float16(in[0])), v_out); free(in); cudaFree(d_in); } diff --git a/paddle/fluid/pybind/.gitignore b/paddle/fluid/pybind/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8f222791edb016df65be5db75831f5f83cf63726 --- /dev/null +++ b/paddle/fluid/pybind/.gitignore @@ -0,0 +1 @@ +pybind.h diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 787925d9f8800b49de5b8b642304605ef4087d1e..884289a7fda65f9713392ec459219b4c89271e73 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -2,13 +2,13 @@ if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) else() cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 748ad75a99ea4955730327a10ae8468a107fed0a..bd8446df6650f5fb1c62e5370fd48216dbf31e17 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -544,13 +544,20 @@ All parameter, weight, gradient are variables in Paddle. [](ParallelExecutor &self, size_t num_threads, bool use_event, const std::vector &places, const std::unordered_set ¶ms, - const ProgramDesc &startup_program, + const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, - Scope *scope, bool allow_op_delay) { - new (&self) ParallelExecutor(num_threads, use_event, places, - params, startup_program, main_program, - loss_var_name, scope, allow_op_delay); + Scope *scope, std::vector &local_scopes, + bool allow_op_delay) { + new (&self) + ParallelExecutor(num_threads, use_event, places, params, + bcast_vars, main_program, loss_var_name, + scope, local_scopes, allow_op_delay); }) + .def("local_scopes", + [](ParallelExecutor &self) -> std::vector * { + return &self.GetLocalScopes(); + }, + py::return_value_policy::reference) .def("run", &ParallelExecutor::Run); BindRecordIOWriter(&m); diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 77f84cd43bdf35ae6f54b0db2b5f720d24872878..a1f446817e0cbc1b4391398a82b0846d01bbec2c 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -6,6 +6,6 @@ if(WITH_TESTING) add_library(paddle_test_util STATIC TestUtil.cpp) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) if(NOT MOBILE_INFERENCE) - cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init paddle_memory gtest gflags) + cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init memory gtest gflags) endif() endif() diff --git a/python/.gitignore b/python/.gitignore index 1ba1d4c9b0301ed920f5303089e75dd3a8e4e3fa..53a2b7a76b0dd2d9095f9582540e455e2c1174e2 100644 --- a/python/.gitignore +++ b/python/.gitignore @@ -1,6 +1,7 @@ *pyc build dist +paddlepaddle.egg-info paddle.egg-info paddlepaddle_gpu.egg-info .idea diff --git a/python/paddle/.gitignore b/python/paddle/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..98527864664d32f798edc06a53131e8d5a068295 --- /dev/null +++ b/python/paddle/.gitignore @@ -0,0 +1 @@ +version.py diff --git a/python/paddle/fluid/debuger.py b/python/paddle/fluid/debuger.py index 7b4afa9bf65e1369329cd4648c1f5c4bd8fa8357..1c56064a1e8bdc5d975837cb5a75a40d557765ad 100644 --- a/python/paddle/fluid/debuger.py +++ b/python/paddle/fluid/debuger.py @@ -16,6 +16,7 @@ import sys import re from graphviz import GraphPreviewGenerator import proto.framework_pb2 as framework_pb2 +from google.protobuf import text_format _vartype2str_ = [ "UNK", @@ -100,7 +101,7 @@ def repr_var(vardesc): def pprint_program_codes(program_desc): reprs = [] - for block_idx in range(program_desc.num_blocks()): + for block_idx in range(program_desc.desc.num_blocks()): block_desc = program_desc.block(block_idx) block_repr = pprint_block_codes(block_desc) reprs.append(block_repr) @@ -127,7 +128,7 @@ def pprint_block_codes(block_desc, show_backward=False): if type(block_desc) is not framework_pb2.BlockDesc: block_desc = framework_pb2.BlockDesc.FromString( - block_desc.serialize_to_string()) + block_desc.desc.serialize_to_string()) var_reprs = [] op_reprs = [] for var in block_desc.vars: @@ -237,13 +238,13 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): # draw parameters and args vars = {} for var in desc.vars: - shape = [str(i) for i in var.lod_tensor.tensor.dims] - if not shape: - shape = ['null'] + # TODO(gongwb): format the var.type # create var if var.persistable: varn = graph.add_param( - var.name, var.type, shape, highlight=need_highlight(var.name)) + var.name, + str(var.type).replace("\n", "
", 1), + highlight=need_highlight(var.name)) else: varn = graph.add_arg(var.name, highlight=need_highlight(var.name)) vars[var.name] = varn @@ -268,4 +269,4 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): for var in op.outputs: add_op_link_var(opn, var, True) - graph(path, show=True) + graph(path, show=False) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 401e26f47420eee1c65545eba3e1ffb7f81b303e..33cf6918178ff746a6b130af0e23a69de0f532fe 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -640,12 +640,26 @@ class Operator(object): """ return self.desc.block_attr(name) + def all_attrs(self): + """ + Get the attribute dict + Returns(dict): The Operator's attribute dict + """ + attr_names = self.attr_names + attr_map = {} + for n in attr_names: + if n == 'sub_block': + attr_map[n] = self.block_attr(n) + else: + attr_map[n] = self.attr(n) + return attr_map + class Block(object): def __init__(self, program, idx): self.desc = program.desc.block(idx) self.vars = dict() # var_name --> var - self.ops = collections.deque() # operator list + self.ops = list() # operator list self.program = program self.removed_vars = dict() @@ -817,6 +831,13 @@ class Block(object): self.ops.append(op) return op + def insert_op(self, index, *args, **kwargs): + self.sync_with_cpp() + op_desc = self.desc.insert_op(index) + op = Operator(block=self, desc=op_desc, *args, **kwargs) + self.ops.insert(index, op) + return op + def delete_ops(self, ops): # remove from cpp # FIXME(typhoonzero): remove only the first occurrence. @@ -828,12 +849,12 @@ class Block(object): self.desc.remove_op(start, end + 1) def slice_ops(self, start, end): - return list(self.ops)[start:end] + return self.ops[start:end] def prepend_op(self, *args, **kwargs): op_desc = self.desc.prepend_op() op = Operator(self, op_desc, *args, **kwargs) - self.ops.appendleft(op) + self.ops.insert(0, op) return op def sync_with_cpp(self): @@ -878,7 +899,7 @@ class Block(object): for index in range((start_index - 1 - 1), -1, -1): op_desc = ops_in_cpp[index] op = Operator(self, op_desc) - self.ops.appendleft(op) + self.ops.insert(0, op) # sync ops append to the end of cpp_ops for index in range((end_index + 1), len(ops_in_cpp)): @@ -951,6 +972,13 @@ class Block(object): if var.type == core.VarDesc.VarType.STEP_SCOPES: ret_var = self.create_var( name=var.name, persistable=var.persistable, type=var.type) + elif var.type == core.VarDesc.VarType.SELECTED_ROWS: + ret_var = self.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=True) else: ret_var = self.create_var( name=var.name, diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index b8d21344fc8f65f4025f28a195dab2d371b30292..125b4efa9d476e561bd78d0365cd92bbf7e66605 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -83,7 +83,7 @@ class Graph(object): file = open(dot_path, 'w') file.write(self.__str__()) image_path = os.path.join( - os.path.dirname(__file__), dot_path[:-3] + "pdf") + os.path.dirname(dot_path), dot_path[:-3] + "pdf") cmd = ["dot", "-Tpdf", dot_path, "-o", image_path] subprocess.Popen( cmd, @@ -199,7 +199,7 @@ class GraphPreviewGenerator(object): else: self.graph.show(path) - def add_param(self, name, data_type, shape, highlight=False): + def add_param(self, name, data_type, highlight=False): label = '\n'.join([ '<', ' ', @@ -214,11 +214,6 @@ class GraphPreviewGenerator(object): str(data_type), ' ' ' ', - ' ', - ' ' - ' ', '
', - '[%s]' % 'x'.join(shape), - '
>', ]) return self.graph.node( diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index bd7e9c30fed2c38a206bf17a646d8a4433af4099..969398bda4cfd0b2f5e39f45d34a1da9b216901f 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -255,7 +255,32 @@ def _copy_reader_var_(block, var): new_var.desc.set_shapes(var.desc.shapes()) new_var.desc.set_dtypes(var.desc.dtypes()) new_var.persistable = True - return monkey_patch_reader_methods(new_var) + return new_var + + +def _copy_reader_create_op_(block, op): + input_param_names = op.input_names + new_input_map = {} + for param_name in input_param_names: + new_input_map[param_name] = [] + arg_names = op.input(param_name) + for arg_name in arg_names: + new_input_map[param_name].append(block.var(arg_name)) + + output_param_names = op.output_names + new_output_map = {} + for param_name in output_param_names: + new_output_map[param_name] = [] + arg_names = op.output(param_name) + for arg_name in arg_names: + new_output_map[param_name].append(block.var(arg_name)) + + new_op = block.append_op( + type=op.type, + inputs=new_input_map, + outputs=new_output_map, + attrs=op.all_attrs()) + return new_op def open_recordio_file(filename, shapes, lod_levels, dtypes): @@ -283,8 +308,9 @@ def open_recordio_file(filename, shapes, lod_levels, dtypes): startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + return monkey_patch_reader_methods(main_prog_var) def open_files(filenames, thread_num, shapes, lod_levels, dtypes): @@ -313,22 +339,25 @@ def open_files(filenames, thread_num, shapes, lod_levels, dtypes): startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + return monkey_patch_reader_methods(main_prog_var) def __create_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) - startup_blk.append_op( + startop_op = startup_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [startup_var]}, attrs=attrs) startup_var.persistable = True - return _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_block = default_main_program().current_block() + main_prog_var = _copy_reader_var_(main_prog_block, startup_var) + _copy_reader_create_op_(main_prog_block, startop_op) + return monkey_patch_reader_methods(main_prog_var) def create_shuffle_reader(reader, buffer_size): diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 1b3ba414ecb50cc4d75dcaecd1f31265334c9aec..b93f2f974ca28cfd8d03c0dbbf1d401620a15e53 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -22,10 +22,49 @@ __all__ = ['ParallelExecutor'] class ParallelExecutor(object): def __init__(self, - loss_name, use_cuda, + loss_name=None, + main_program=None, num_threads=None, - allow_op_delay=False): + allow_op_delay=False, + share_vars_from=None): + """ + ParallelExecutor can run program in parallel. + + Args: + use_cuda(bool): Whether to use CUDA or not. + loss_name(str, default None): The loss name must set in training. + main_program(Program, default None): The program that need to run, + if not provided, then default_main_program will be used. + num_threads(int, default None): How many threads are used for + training. + allow_op_delay(bool, default False): Whether to delay and buffer + some operators together for scheduling or not, which may + improve performance in some cases, defalut False. + share_vars_from(ParallelExecutor, default None): If provied, + it will share variables from the specified ParallelExecutor. + + Returns: + A ParallelExecutor object. + + Raises: + TypeError: If share_vars_from is provided, but not ParallelExecutor + object. + + Examples: + .. code-block:: python + + train_exe = fluid.ParallelExecutor( + use_cuda=True, loss_name=loss.name) + test_exe = fluid.ParallelExecutor( + use_cuda=True, + main_program=test_program, + share_vars_from=train_exe) + + train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) + test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + """ + self._places = [] self._act_places = [] if use_cuda: @@ -50,10 +89,21 @@ class ParallelExecutor(object): else: min(len(self._places) * 2, multiprocessing.cpu_count()) - startup = framework.default_startup_program() - main = framework.default_main_program() + main = main_program + main = main if main else framework.default_main_program() scope = executor.global_scope() + if share_vars_from and not isinstance(share_vars_from, + ParallelExecutor): + raise TypeError("share_vars_from must be ParallelExecutor.") + local_scopes = share_vars_from.executor.local_scopes( + ) if share_vars_from else [] + + persistable_vars = [ + v.name + for v in filter(lambda var: var.persistable, main.list_vars()) + ] + self.executor = core.ParallelExecutor( num_threads, True if use_cuda else False, # use_event @@ -62,10 +112,11 @@ class ParallelExecutor(object): p.name for p in main.global_block().iter_parameters() if not p.stop_gradient ]), - startup.desc, + set(persistable_vars), main.desc, - loss_name, + loss_name if loss_name else '', scope, + local_scopes, allow_op_delay) self.scope = scope diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index fb162f8b7315936824ad40aca0c99e4dd09f9734..c5b53902bca90ae2260a7cda43e6866f897233b3 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -535,9 +535,37 @@ class TestSwish(OpTest): #--------------------test MKLDNN-------------------- -class TestMKLDNNRelu(TestRelu): +class TestMKLDNNReluDim2(TestRelu): def setUp(self): - super(TestMKLDNNRelu, self).setUp() + super(TestMKLDNNReluDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNTanhDim2(TestTanh): + def setUp(self): + super(TestMKLDNNTanhDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNSqrtDim2(TestSqrt): + def setUp(self): + super(TestMKLDNNSqrtDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNAbsDim2(TestAbs): + def setUp(self): + super(TestMKLDNNAbsDim2, self).setUp() + + self.attrs = {"use_mkldnn": True} + + +class TestMKLDNNReluDim4(TestRelu): + def setUp(self): + super(TestMKLDNNReluDim4, self).setUp() x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") # The same reason with TestAbs @@ -549,9 +577,9 @@ class TestMKLDNNRelu(TestRelu): self.attrs = {"use_mkldnn": True} -class TestMKLDNNTanh(TestTanh): +class TestMKLDNNTanhDim4(TestTanh): def setUp(self): - super(TestMKLDNNTanh, self).setUp() + super(TestMKLDNNTanhDim4, self).setUp() self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") @@ -560,9 +588,9 @@ class TestMKLDNNTanh(TestTanh): self.attrs = {"use_mkldnn": True} -class TestMKLDNNSqrt(TestSqrt): +class TestMKLDNNSqrtDim4(TestSqrt): def setUp(self): - super(TestMKLDNNSqrt, self).setUp() + super(TestMKLDNNSqrtDim4, self).setUp() self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") @@ -571,9 +599,9 @@ class TestMKLDNNSqrt(TestSqrt): self.attrs = {"use_mkldnn": True} -class TestMKLDNNAbs(TestAbs): +class TestMKLDNNAbsDim4(TestAbs): def setUp(self): - super(TestMKLDNNAbs, self).setUp() + super(TestMKLDNNAbsDim4, self).setUp() x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") # The same reason with TestAbs diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py index 2b7bbf9218f9b8fd8f5b29ac3cbc2f9680f471eb..67b03f635b6f8a3003efabe5425325080d47f61c 100644 --- a/python/paddle/fluid/tests/unittests/test_debugger.py +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -51,7 +51,9 @@ class TestDebugger(unittest.TestCase): outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) - print(debuger.pprint_program_codes(p.desc)) + print(debuger.pprint_program_codes(p)) + + debuger.draw_block_graphviz(p.block(0), path="./test.dot") if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index 0f90e0e4df5da93f427b892d1be69f14625d2e29..8401716db88ef3dda68644a052d78b4476c9fdc7 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -207,7 +207,11 @@ class TestParallelExecutorBase(unittest.TestCase): if memory_opt: fluid.memory_optimize(main) - exe = fluid.ParallelExecutor(loss_name=loss.name, use_cuda=True) + place = fluid.CUDAPlace(0) + startup_exe = fluid.Executor(place) + startup_exe.run(startup) + + exe = fluid.ParallelExecutor(True, loss_name=loss.name) if batch_size is not None: batch_size *= fluid.core.get_cuda_device_count() begin = time.time() @@ -453,3 +457,41 @@ class TestTransformer(TestParallelExecutorBase): @unittest.skip("transformer is buggy in multi gpu") def test_main(self): self.check_network_convergence(transformer) + + +class ParallelExecutorTestingDuringTraining(unittest.TestCase): + def test_parallel_testing(self): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + loss = simple_fc_net(True) + test_program = main.clone(for_test=True) + + opt = fluid.optimizer.SGD(learning_rate=0.0001) + opt.minimize(loss) + + batch_size = 32 + image = numpy.random.normal(size=(batch_size, + 784)).astype('float32') + label = numpy.random.randint(0, 10, (batch_size, 1), dtype="int64") + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(startup) + feed_dict = {'image': image, 'label': label} + + train_exe = fluid.ParallelExecutor( + use_cuda=True, loss_name=loss.name, main_program=main) + + test_exe = fluid.ParallelExecutor( + use_cuda=True, + main_program=test_program, + share_vars_from=train_exe) + + for i in xrange(5): + test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + test_loss = numpy.array(test_loss) + + train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) + train_loss = numpy.array(train_loss) + self.assertTrue(numpy.allclose(train_loss, test_loss)) diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py index 640264d82f0dc7fa71bf882d5549e30b87b8d7c5..24a0074d9b9621d902d12eb8cb29d9b65be22ed3 100644 --- a/python/paddle/fluid/tests/unittests/test_recordio_reader.py +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -15,8 +15,8 @@ import unittest import paddle.fluid as fluid -import paddle -import paddle.dataset.mnist as mnist +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist class TestRecordIO(unittest.TestCase):