未验证 提交 c9e5c1e4 编写于 作者: Q qingqing01 提交者: GitHub

Hidden ParallelDo. (#13454)

上级 6757a315
...@@ -237,12 +237,6 @@ paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=Non ...@@ -237,12 +237,6 @@ paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=Non
paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None))
paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.read_input ArgSpec(args=['self', 'var'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ParallelDo.write_output ArgSpec(args=['self', 'var'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Print ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both')) paddle.fluid.layers.Print ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both'))
paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,)) paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,))
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
......
...@@ -41,7 +41,6 @@ __all__ = [ ...@@ -41,7 +41,6 @@ __all__ = [
'DynamicRNN', 'DynamicRNN',
'StaticRNN', 'StaticRNN',
'reorder_lod_tensor_by_rank', 'reorder_lod_tensor_by_rank',
'ParallelDo',
'Print', 'Print',
'is_empty', 'is_empty',
] ]
...@@ -259,7 +258,7 @@ class ParallelDo(object): ...@@ -259,7 +258,7 @@ class ParallelDo(object):
# ParallelDo version & Single-thread version # ParallelDo version & Single-thread version
if thread_num > 1: if thread_num > 1:
places = fluid.layers.get_places(thread_num) places = fluid.layers.get_places(thread_num)
pd = fluid.layers.ParallelDo(places) pd = fluid.layers.control_flow.ParallelDo(places)
with pd.do(): with pd.do():
images = pd.read_input(images) images = pd.read_input(images)
label = pd.read_input(label) label = pd.read_input(label)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
from paddle.fluid.layers.device import get_places from paddle.fluid.layers.device import get_places
from paddle.fluid.layers.control_flow import ParallelDo
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
...@@ -147,7 +148,7 @@ def train(word_dict, ...@@ -147,7 +148,7 @@ def train(word_dict,
data, label, input_dim=dict_dim, class_dim=class_dim) data, label, input_dim=dict_dim, class_dim=class_dim)
else: else:
places = get_places() places = get_places()
pd = fluid.layers.ParallelDo(places) pd = ParallelDo(places)
with pd.do(): with pd.do():
cost, acc, _ = net_method( cost, acc, _ = net_method(
pd.read_input(data), pd.read_input(data),
......
...@@ -25,6 +25,7 @@ import numpy ...@@ -25,6 +25,7 @@ import numpy
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places from paddle.fluid.layers.device import get_places
from paddle.fluid.layers.control_flow import ParallelDo
BATCH_SIZE = 64 BATCH_SIZE = 64
...@@ -81,7 +82,7 @@ def train(nn_type, ...@@ -81,7 +82,7 @@ def train(nn_type,
if parallel: if parallel:
places = get_places() places = get_places()
pd = fluid.layers.ParallelDo(places) pd = ParallelDo(places)
with pd.do(): with pd.do():
img_ = pd.read_input(img) img_ = pd.read_input(img)
label_ = pd.read_input(label) label_ = pd.read_input(label)
......
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places from paddle.fluid.layers.device import get_places
from paddle.fluid.layers.control_flow import ParallelDo
import unittest import unittest
import os import os
import numpy as np import numpy as np
...@@ -84,7 +85,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): ...@@ -84,7 +85,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
[first_word, second_word, third_word, forth_word, next_word]) [first_word, second_word, third_word, forth_word, next_word])
else: else:
places = get_places() places = get_places()
pd = fluid.layers.ParallelDo(places) pd = ParallelDo(places)
with pd.do(): with pd.do():
avg_cost, predict_word = __network__( avg_cost, predict_word = __network__(
list( list(
......
...@@ -20,6 +20,7 @@ import sys ...@@ -20,6 +20,7 @@ import sys
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places from paddle.fluid.layers.device import get_places
from paddle.fluid.layers.control_flow import ParallelDo
# need to fix random seed and training data to compare the loss # need to fix random seed and training data to compare the loss
# value accurately calculated by the default and the memory optimization # value accurately calculated by the default and the memory optimization
...@@ -38,7 +39,7 @@ if fluid.core.is_compiled_with_cuda(): ...@@ -38,7 +39,7 @@ if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
places = get_places(device_count=0, device_type=device_type) places = get_places(device_count=0, device_type=device_type)
pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) pd = ParallelDo(places, use_nccl=use_nccl)
with pd.do(): with pd.do():
x_ = pd.read_input(x) x_ = pd.read_input(x)
y_ = pd.read_input(y) y_ = pd.read_input(y)
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places from paddle.fluid.layers.device import get_places
from paddle.fluid.layers.control_flow import ParallelDo
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
import numpy import numpy
import six import six
...@@ -120,7 +121,7 @@ class BaseParallelForTest(unittest.TestCase): ...@@ -120,7 +121,7 @@ class BaseParallelForTest(unittest.TestCase):
thread_num = fluid.core.get_cuda_device_count( thread_num = fluid.core.get_cuda_device_count(
) if use_gpu else 8 ) if use_gpu else 8
places = get_places(thread_num) places = get_places(thread_num)
pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) pd = ParallelDo(places, use_nccl=use_nccl)
data = next(generator) data = next(generator)
if isinstance(data, fluid.framework.Variable): if isinstance(data, fluid.framework.Variable):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册