diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index 1e2185dfac1072d1f1046f4616a9d53a8fc76061..400200c4745017bd9d160bb9e415fde041c0a6c8 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -159,6 +159,7 @@ def run_benchmark(model, args): paddle.dataset.mnist.train(), batch_size=args.batch_size) accuracy = fluid.metrics.Accuracy() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) iters, num_samples, start_time = 0, 0, time.time() for pass_id in range(args.pass_num): accuracy.reset() @@ -175,17 +176,20 @@ def run_benchmark(model, args): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([len(y_data), 1]) - outs = exe.run( - fluid.default_main_program(), + outs = train_exe.run( feed={"pixel": img_data, "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor] + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ] ) # The accuracy is the accumulation of batches, but not the current batch. - accuracy.update(value=outs[1], weight=outs[2]) + accuracy.update( + value=np.array(np.mean(outs[1])), + weight=np.mean(np.array(outs[2]))) iters += 1 num_samples += len(y_data) - loss = np.array(outs[0]) - acc = np.array(outs[1]) + loss = np.mean(np.array(outs[0])) + acc = np.mean(np.array(outs[1])) train_losses.append(loss) train_accs.append(acc) print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 831fa2c019fc2868cd85b1ca7b2c8c76a2f1628c..0fd7258a804e7c93b0b03da140140394bf90004a 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -241,6 +241,7 @@ def run_benchmark(model, args): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) accuracy = fluid.average.WeightedAverage() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) if args.use_fake_data: data = train_reader().next() image = np.array(map(lambda x: x[0].reshape(dshape), data)).astype( @@ -264,14 +265,17 @@ def run_benchmark(model, args): data)).astype('float32') label = np.array(map(lambda x: x[1], data)).astype('int64') label = label.reshape([-1, 1]) - loss, acc, weight = exe.run( - fluid.default_main_program(), + loss, acc, weight = train_exe.run( feed={'data': image, 'label': label}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ]) iters += 1 num_samples += len(label) - accuracy.add(value=acc, weight=weight) + accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight)) + loss = np.mean(np.array(loss)) + acc = np.mean(np.array(acc)) train_losses.append(loss) train_accs.append(acc) print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 53e34e0cbd15914791c305db6797f826ebfae34e..2a9566a45c3804183e05db9298cec4f670225a6f 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -169,6 +169,7 @@ def main(): iters, num_samples, start_time = 0, 0, time.time() accuracy = fluid.average.WeightedAverage() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) for pass_id in range(args.pass_num): accuracy.reset() train_accs = [] @@ -184,14 +185,17 @@ def main(): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([-1, 1]) - loss, acc, weight = exe.run( - fluid.default_main_program(), + loss, acc, weight = train_exe.run( feed={"pixel": img_data, "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) - accuracy.add(value=acc, weight=weight) + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ]) + accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight)) iters += 1 num_samples += len(y_data) + loss = np.mean(np.array(loss)) + acc = np.mean(np.array(acc)) print( "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % (pass_id, iters, loss, acc) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 0558393df6e3416be5e8ebd413025614779230ed..1144ca7f43b9ffd288b06672dbce82d5fae26347 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -156,4 +156,10 @@ copy(string_lib DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat ) +set(module "pybind") +copy(pybind_lib + SRCS ${CMAKE_CURRENT_BINARY_DIR}/paddle/fluid/${module}/pybind.h + DSTS ${dst_dir}/${module} +) + add_custom_target(inference_lib_dist DEPENDS ${inference_lib_dist_dep}) diff --git a/doc/fluid/design/concepts/functions_operators_layers.md b/doc/fluid/design/concepts/functions_operators_layers.md index 30bc488a18a28d349645d9d2502aae6691a69931..1f86b99e5197c3e0b85fd76fe704520ef21b06d3 100644 --- a/doc/fluid/design/concepts/functions_operators_layers.md +++ b/doc/fluid/design/concepts/functions_operators_layers.md @@ -40,7 +40,7 @@ template class FCOp : public OperatorBase { public: void Run(...) { - add(mul(Input("X"), Input("W")), Input("b"); + add(mul(Input("X"), Input("W")), Input("b")); } }; REGISTER_OP(FCOp, "fc"); diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index fe1735d05dde5f09d5c72c68e5002d16f0083eb5..8f94206a87dbae8a81727ca48718886bbabbe25c 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -70,6 +70,14 @@ class OpHandleBase { const std::vector &Inputs() const { return inputs_; } + size_t NoDupInputSize() const { + std::unordered_set res; + for (auto *var : inputs_) { + res.emplace(var); + } + return res.size(); + } + const std::vector &Outputs() const { return outputs_; } protected: diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index ef263d82c5ec93f0673eb0ac70e4fb02904bff13..815f739371e77d953a28be99b38ec1b8ff26506c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -174,7 +174,7 @@ void ThreadedSSAGraphExecutor::InsertFetchOps( void ThreadedSSAGraphExecutor::InsertPendingOp( std::unordered_map *pending_ops, OpHandleBase *op_instance) const { - pending_ops->insert({op_instance, op_instance->Inputs().size()}); + pending_ops->insert({op_instance, op_instance->NoDupInputSize()}); } void ThreadedSSAGraphExecutor::InsertPendingVar( diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index abc9ebf472498f6653d5bb1113ae2f3ce7e5a923..1cd3ed9a00acead2599420f88499bd0d74c2974b 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -49,7 +49,7 @@ class OpConverter { // convert fluid block to tensorrt network void ConvertBlock(const framework::proto::BlockDesc& block, TensorRTEngine* engine) { - for (size_t i = 0; i < block.ops_size(); i++) { + for (int i = 0; i < block.ops_size(); i++) { const auto& op = block.ops(i); OpConverter::Run(op, engine); } diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index c44c5f164b2d84616e9a85813e0ee5219b41df28..622420c1c33a62994c81ad9534c4fa37a4a1fa1a 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -105,7 +105,7 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - auto in_dims = ctx->GetInputDim("X"); + auto in_dims = ctx->GetInputDim("Diff"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_GE(out_dims.size(), 2, @@ -127,12 +127,33 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { } }; +class SmoothL1LossGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("smooth_l1_loss_grad"); + op->SetInput("InsideWeight", Input("InsideWeight")); + op->SetInput("OutsideWeight", Input("OutsideWeight")); + op->SetInput("Diff", Output("Diff")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + return std::unique_ptr(op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(smooth_l1_loss, ops::SmoothL1LossOp, ops::SmoothL1LossOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::SmoothL1LossGradMaker); REGISTER_OPERATOR(smooth_l1_loss_grad, ops::SmoothL1LossGradOp); REGISTER_OP_CPU_KERNEL( smooth_l1_loss, diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index cb0624eac4f55cbd4739b1dd10a4256da67dd85f..58a30ab3e522c5b14ac8c72d4b03667de6720425 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -20,19 +20,15 @@ #================================================= function print_usage() { - RED='\033[0;31m' - BLUE='\033[0;34m' - BOLD='\033[1m' - NONE='\033[0m' - echo -e "\n${RED}Usage${NONE}: - ${BOLD}$0${NONE} [OPTION]" + ${BOLD}${SCRIPT_NAME}${NONE} [OPTION]" echo -e "\n${RED}Options${NONE}: ${BLUE}build${NONE}: run build for x86 platform ${BLUE}build_android${NONE}: run build for android platform ${BLUE}build_ios${NONE}: run build for ios platform ${BLUE}test${NONE}: run all unit tests + ${BLUE}single_test${NONE}: run a single unit test ${BLUE}bind_test${NONE}: parallel tests bind to different GPU ${BLUE}doc${NONE}: generate paddle documents ${BLUE}html${NONE}: convert C++ source code into HTML @@ -45,7 +41,15 @@ function print_usage() { } function init() { + RED='\033[0;31m' + BLUE='\033[0;34m' + BOLD='\033[1m' + NONE='\033[0m' + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" + if [ -z "${SCRIPT_NAME}" ]; then + SCRIPT_NAME=$0 + fi } function cmake_gen() { @@ -309,6 +313,25 @@ EOF fi } +function single_test() { + TEST_NAME=$1 + if [ -z "${TEST_NAME}" ]; then + echo -e "${RED}Usage:${NONE}" + echo -e "${BOLD}${SCRIPT_NAME}${NONE} ${BLUE}single_test${NONE} [test_name]" + exit 1 + fi + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + if [ ${WITH_TESTING:-ON} == "ON" ] ; then + cat < 0.01: + if float(cost) < 60.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 861dd3174a21d59fe12e0b794ecb2a934946ac71..ce3ba3ebc50d7b015f379b5e80b179463a7b231a 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -13,15 +13,62 @@ # limitations under the License. import paddle.fluid as fluid +import unittest -def test_converter(): - img = fluid.layers.data(name='image', shape=[1, 28, 28]) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) - result = feeder.feed([[[0] * 784, [9]], [[1] * 784, [1]]]) - print(result) +class TestDataFeeder(unittest.TestCase): + def test_lod_level_0_converter(self): + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) + print(result) + + self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['image'].lod(), []) + self.assertEqual(result['label'].lod(), []) + + def test_lod_level_1_converter(self): + # lod_level = 1 + # each sentence has a different number of words + sentences = fluid.layers.data( + name='sentences', shape=[1], dtype='int64', lod_level=1) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) + + # lod = [[0, 3, 5, 9]] + # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] + # label = [1] * len(data) + result = feeder.feed( + [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])]) + print(result) + + self.assertEqual(result['sentences'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [3, 1]) + self.assertEqual(result['sentences'].lod(), [[0, 3, 5, 9]]) + self.assertEqual(result['label'].lod(), []) + + def test_lod_level_2_converter(self): + # lod_level = 2 + # paragraphs -> sentences -> words + paragraphs = fluid.layers.data( + name='paragraphs', shape=[1], dtype='int64', lod_level=2) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) + + # lod = [[0, 2, 3], [0, 3, 5, 9]] + # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] + # label = [1] * len(data) + result = feeder.feed( + [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])]) + print(result) + + self.assertEqual(result['paragraphs'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['paragraphs'].lod(), [[0, 2, 3], [0, 3, 5, 9]]) + self.assertEqual(result['label'].lod(), []) if __name__ == '__main__': - test_converter() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 496acc57912aacdf76f21aa88efb071d49aa0ce9..2ae9653953c2f5f6a399243bef2c7fb756f9692f 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -28,11 +28,11 @@ function(py_test_modules TARGET_NAME) if(WITH_TESTING) set(options "") set(oneValueArgs "") - set(multiValueArgs MODULES DEPS ARGS ENVS) + set(multiValueArgs MODULES DEPS ENVS) cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_modules_ENVS} - ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} + ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index c24662ac2114c286b1c50286fea1b65cf7c1b3a8..a47af7ccb210fed86c52e0cd6ae0ab683284b2df 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -131,7 +131,40 @@ class Trainer(object): # load params from param_path into scope io.load_persistables(exe, dirname=param_path) + def _transpile_nccl2_dist(self): + # PADDLE_TRAINER_IPS + if "PADDLE_TRAINER_IPS" not in os.environ: + self.nccl_id_var = None + else: + self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + port = os.getenv("PADDLE_PSERVER_PORT") + worker_ips = os.getenv("PADDLE_TRAINER_IPS") + worker_endpoints = [] + for ip in worker_ips.split(","): + worker_endpoints.append(':'.join([ip, port])) + self.num_trainers = len(worker_endpoints) + current_endpoint = os.getenv("POD_IP") + ":" + port + worker_endpoints.remove(current_endpoint) + # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id + # in ParallelExecutor to start + # distributed training using NCCL2 + self.nccl_id_var = self.startup_program.global_block().create_var( + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + self.startup_program.global_block().append_op( + type="gen_nccl_id", + inputs={}, + outputs={"NCCLID": self.nccl_id_var}, + attrs={ + "endpoint": current_endpoint, + "endpoint_list": worker_endpoints, + "trainer_id": self.trainer_id + }) + def _dist_transpile_if_necessary(self, optimize_ops, params_grads): + self._transpile_nccl2_dist() + if self.nccl_id_var != None: + return + if "PADDLE_TRAINING_ROLE" not in os.environ: return diff --git a/tools/test_runner.py b/tools/test_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..9dc750b89058cd73355a2f7984d577252c03526d --- /dev/null +++ b/tools/test_runner.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import sys +import paddle.fluid as fluid +import importlib +import cStringIO + + +def main(): + sys.path.append(os.getcwd()) + some_test_failed = False + for module_name in sys.argv[1:]: + buffer = cStringIO.StringIO() + main = fluid.Program() + startup = fluid.Program() + scope = fluid.core.Scope() + with fluid.program_guard(main, startup): + with fluid.scope_guard(scope): + with fluid.unique_name.guard(): + test_loader = unittest.TestLoader() + module = importlib.import_module(module_name) + tests = test_loader.loadTestsFromModule(module) + res = unittest.TextTestRunner(stream=buffer).run(tests) + if not res.wasSuccessful(): + some_test_failed = True + print >> sys.stderr, module_name, 'failed\n', buffer.getvalue( + ) + + if some_test_failed: + exit(1) + + +if __name__ == '__main__': + main()