diff --git a/benchmark/fluid/fluid_benchmark.py b/benchmark/fluid/fluid_benchmark.py index 8e6bfcbd3017a60ab5d5b4dcdbe313b1091a07ca..49f26255f315c3c368f42b367dfc6487ffa0deb5 100644 --- a/benchmark/fluid/fluid_benchmark.py +++ b/benchmark/fluid/fluid_benchmark.py @@ -69,6 +69,11 @@ def parse_args(): type=int, default=1, help='If gpus > 1, will use ParallelExecutor to run, else use Executor.') + parser.add_argument( + '--cpus', + type=int, + default=1, + help='If cpus > 1, will use ParallelDo to run, else use Executor.') parser.add_argument( '--data_set', type=str, diff --git a/benchmark/fluid/models/mnist.py b/benchmark/fluid/models/mnist.py index d264bfc12bdb159c06dae81db4949b9ee17268e2..28a38a931cf6cfcd5dd858b363b3d29b70368315 100644 --- a/benchmark/fluid/models/mnist.py +++ b/benchmark/fluid/models/mnist.py @@ -69,15 +69,30 @@ def get_model(args): images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) label = fluid.layers.data(name='label', shape=[1], dtype='int64') - # Train program - predict = cnn_model(images) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = cnn_model(pd.read_input(images)) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + # Train program + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_acc = fluid.layers.accuracy(input=predict, label=label) # inference program inference_program = fluid.default_main_program().clone() diff --git a/benchmark/fluid/models/resnet.py b/benchmark/fluid/models/resnet.py index 9dec8911ed64e09285fb461c4a12adb601535316..f951f73a35dc4dc6f796178ebbc3e2886b2d7d8c 100644 --- a/benchmark/fluid/models/resnet.py +++ b/benchmark/fluid/models/resnet.py @@ -132,18 +132,33 @@ def get_model(args): input = fluid.layers.data(name='data', shape=dshape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - predict = model(input, class_dim) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = model(pd.read_input(input), class_dim) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + predict = model(input, class_dim) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program): inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) + target_vars=[batch_acc]) optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) diff --git a/benchmark/fluid/models/stacked_dynamic_lstm.py b/benchmark/fluid/models/stacked_dynamic_lstm.py index 81a28b5f3aed0c325398b909d700c23df545824a..1b680d76a8ba1ead7c8c50065e1817c45b951b27 100644 --- a/benchmark/fluid/models/stacked_dynamic_lstm.py +++ b/benchmark/fluid/models/stacked_dynamic_lstm.py @@ -101,9 +101,8 @@ def get_model(args): loss = fluid.layers.mean(x=loss) # add acc - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') batch_acc = fluid.layers.accuracy(input=logit, label=fluid.layers.data(name='label', \ - shape=[1], dtype='int64'), total=batch_size_tensor) + shape=[1], dtype='int64')) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program):