executor.py 47.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
Zeng Jinle 已提交
17
import logging
18 19
import os
import multiprocessing
C
chengduo 已提交
20
import sys
21
import warnings
D
dzhwinter 已提交
22
import numpy as np
S
rename  
sneaxiy 已提交
23
from .wrapped_decorator import signature_safe_contextmanager
24
import six
25
from .data_feeder import convert_dtype
H
Huihuang Zheng 已提交
26
from .framework import Program, default_main_program, Variable, convert_np_dtype_to_dtype_
27
from . import core
28 29
from . import compiler
from .. import compat as cpt
30
from .trainer_factory import TrainerFactory
31
from .trainer_factory import FetchHandlerMonitor
32

T
Tink_Y 已提交
33
__all__ = ['Executor', 'global_scope', 'scope_guard']
Y
Yu Yang 已提交
34

Y
Yu Yang 已提交
35
g_scope = core.Scope()
F
flame 已提交
36 37
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
Y
Yu Yang 已提交
38

Y
Yu Yang 已提交
39

Y
Yang Yu 已提交
40
def global_scope():
Y
yuyang18 已提交
41 42 43 44
    """
    Get the global/default scope instance. There are a lot of APIs use
    :code:`global_scope` as its default value, e.g., :code:`Executor.run`

C
chengduo 已提交
45 46 47
    Returns:
        Scope: The global/default scope instance.

48 49 50 51 52 53 54 55
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          numpy.array(fluid.global_scope().find_var("data").get_tensor())
Y
yuyang18 已提交
56
    """
Y
Yang Yu 已提交
57 58 59
    return g_scope


60
def _switch_scope(scope):
Y
Yang Yu 已提交
61 62 63 64 65 66
    global g_scope
    ex = g_scope
    g_scope = scope
    return ex


S
rename  
sneaxiy 已提交
67
@signature_safe_contextmanager
Y
Yang Yu 已提交
68
def scope_guard(scope):
Y
yuyang18 已提交
69
    """
70 71 72 73 74 75 76 77 78 79 80 81
    This function switches scope through python `with` statement.
    Scope records the mapping between variable names and variables ( :ref:`api_guide_Variable` ),
    similar to brackets in programming languages.
    If this function is not invoked, all variables and variable names are recorded in the default global scope.
    When users need to create variables with the same name,
    they need to switch scopes through this function
    if they do not want the mapping of variables with the same name to be overwritten.
    After switching through the `with` statement,
    all variables created in the `with` block will be assigned to a new scope.

    Parameters:
        scope: The new scope.
Y
yuyang18 已提交
82

83 84
    Returns:
        None
L
lujun 已提交
85

Y
yuyang18 已提交
86
    Examples:
87 88
        .. code-block:: python

89
            import paddle.fluid as fluid
L
lujun 已提交
90
            import numpy
Y
yuyang18 已提交
91

L
lujun 已提交
92 93 94 95
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                 fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
            numpy.array(new_scope.find_var("data").get_tensor())
Y
yuyang18 已提交
96
    """
L
lujun 已提交
97

98
    ex = _switch_scope(scope)
Y
Yang Yu 已提交
99
    yield
100
    _switch_scope(ex)
Y
Yang Yu 已提交
101 102


D
dzhwinter 已提交
103
def as_numpy(tensor):
104 105 106
    """
    Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
    For higher dimensional sequence data, please use LoDTensor directly.
107

108
    Examples:
109 110 111 112 113 114 115 116 117 118
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          new_scope = fluid.Scope()
          with fluid.scope_guard(new_scope):
              fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          tensor = new_scope.find_var("data").get_tensor()
          fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
119 120 121 122 123 124 125

    Args:
       tensor(Variable): a instance of Tensor

    Returns:
        numpy.ndarray
    """
C
chengduo 已提交
126 127
    if isinstance(tensor, core.LoDTensorArray):
        return [as_numpy(t) for t in tensor]
D
dzhwinter 已提交
128 129 130 131
    if isinstance(tensor, list):
        return [as_numpy(t) for t in tensor]
    assert isinstance(tensor, core.LoDTensor)
    lod = tensor.lod()
132
    if len(lod) > 0:
D
dzhwinter 已提交
133
        raise RuntimeError("Some of your fetched tensors hold LoD information. \
134 135 136
            They can not be completely cast to Python ndarray. \
            Please set the parameter 'return_numpy' as 'False' to \
            return LoDTensor itself directly.")
Q
qingqing01 已提交
137 138 139 140
    if tensor._is_initialized():
        return np.array(tensor)
    else:
        return None
D
dzhwinter 已提交
141 142


H
Huihuang Zheng 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
def dtype_is_compatible_with(first, second):
    """
    Returns True if the first dtype can be compatible the second one.
    Currently, we require the two dtype's have to be same.
      
    Args:
        dtype (np.dtype|VarType|str): The type of data: float32, int64, etc.
    
    Returns:
        True if the two types are same.
    """
    if not isinstance(first, core.VarDesc.VarType):
        first = convert_np_dtype_to_dtype_(first)
    if not isinstance(second, core.VarDesc.VarType):
        second = convert_np_dtype_to_dtype_(second)
    return first == second


def dimension_is_compatible_with(first, second):
    """
    Returns True if the two dimensions are compatible.

    A dimension is compatible with the other if:
    1. The length of the dimensions are same.
    2. Each non-negative number of the two dimentions are same.
    3. For negative number or 'None' in a dimention, it means unknown so it
       is compatible with any number.

    Args:
        first (list/tuple): integers representing shape. "None" or negative
            number means unknown.
        second (list/tuple): integers representing shape. "None" or negative
            number means unknown.

    Returns:
        True if the two dimensions are compatible.
    """

    dim_len = len(first)
    if dim_len != len(second):
        return False

    for i in range(dim_len):
        if first[i] is None or first[i] < 0:
            continue
        if second[i] is None or second[i] < 0:
            continue
        if first[i] != second[i]:
            return False

    return True


def check_feed_shape_type(var, feed):
    """
    Returns True if the variable doesn't require feed check or it is compatible
    with the shape and have same dtype as the feeded value.

    A dimension is compatible with the other if:
    1. The length of the dimensions are same.
    2. Each non-negative number of the two dimentions are same.
    3. For negative number or 'None' in a dimention, it means unknown so it
       is compatible with any number.
    
    Args:
        var (Variable): the Variable object
        feed (LoDTensor): the feeded value, which must be a LoDTensor
    Returns:
        True if the shape and dtype of variable is compatible with the feed value
    Raises:
        ValueError: if the shape or dtype of the variable is not compatible with
            the feed value
    """
    if var.desc.need_check_feed():
        if not dimension_is_compatible_with(feed.shape(), var.shape):
218 219 220 221
            raise ValueError(
                'The feeded Variable %r should have dimensions = %d, shape = '
                '%r, but received feeded shape %r' %
                (var.name, len(var.shape), var.shape, feed.shape()))
H
Huihuang Zheng 已提交
222
        if not dtype_is_compatible_with(feed._dtype(), var.dtype):
223 224 225 226 227 228 229
            var_dtype_format = convert_dtype(var.dtype) if isinstance(
                var.dtype, core.VarDesc.VarType) else var.dtype
            feed_dtype_format = convert_dtype(feed._dtype()) if isinstance(
                feed._dtype(), core.VarDesc.VarType) else feed._dtype()
            raise ValueError(
                'The data type of feeded Variable %r must be %r, but received %r'
                % (var.name, var_dtype_format, feed_dtype_format))
H
Huihuang Zheng 已提交
230 231 232
    return True


233 234 235 236 237 238 239 240 241 242 243 244
def has_feed_operators(block, feed_targets, feed_holder_name):
    """ Check whether the block already has feed operators.

    Return false if the block does not have any feed operators.
    If some feed operators have been prepended to the block, check that
    the info contained in these feed operators matches the feed_targets
    and feed_holder_name. Raise exception when any mismatch is found.
    Return true when the block has feed operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        feed_targets: a dictionary of {feed_target_name: feed_target_data}
X
xuwei06 已提交
245 246
        feed_holder_name: the name of the variable that holds the data of
            all feed targets. The type of this feed_holder variable is
247 248 249
            FEED_MINIBATCH, which is essentially vector<LoDTensor>.

    Returns:
X
xuwei06 已提交
250
        A boolean value that indicates whether a block has feed operators
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
        that match the info contained in feed_targets and feed_holder_name.
    """

    feed_count = 0
    for op in block.ops:
        if op.desc.type() == 'feed':
            feed_count += 1
            assert op.desc.input('X')[0] == feed_holder_name
            feed_target_name = op.desc.output('Out')[0]
            if feed_target_name not in feed_targets:
                raise Exception("'feed_targets' does not have {} variable".
                                format(feed_target_name))
        else:
            break
    if feed_count > 0 and feed_count != len(feed_targets):
        raise Exception(
            "Feed operators in program desc do not match 'feed_targets'")
    return feed_count > 0


def has_fetch_operators(block, fetch_targets, fetch_holder_name):
    """ Check whether the block already has fetch operators.
X
xuwei06 已提交
273

274 275 276 277 278 279 280 281 282
    Return false if the block does not have any fetch operators.
    If some fetch operators have been appended to the block, check that
    the info contained in these fetch operators matches the fetch_targets
    and fetch_holder_name. Raise exception when any mismatch is found.
    Return true when the block has fetch operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
X
xuwei06 已提交
283 284 285
        fetch_holder_name: the name of the variable that holds the data of
            all fetch targets. The type of this fetch_holder variable is
            FETCH_LIST, which is essentially vector<LoDTensor>.
286

X
xuwei06 已提交
287 288 289
    Return:
        A boolean value that indicates whether a block has fetch operators
        that match the info contained in fetch_targets and fetch_holder_name.
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
    """

    fetch_count = 0
    for op in block.ops:
        if op.desc.type() == 'fetch':
            fetch_count += 1
            assert op.desc.output('Out')[0] == fetch_holder_name
            fetch_target_name = op.desc.input('X')[0]
            if fetch_target_name not in [
                    var.desc.name() for var in fetch_targets
            ]:
                raise Exception("'fetch_targets' does not have {} variable".
                                format(fetch_target_name))
            idx = op.desc.attr('col')
            assert fetch_target_name == fetch_targets[idx].desc.name()
    if fetch_count > 0 and fetch_count != len(fetch_targets):
        raise Exception(
            "Fetch operators in program desc do not match 'fetch_targets'")
    return fetch_count > 0


W
Wu Yi 已提交
311
def _fetch_var(name, scope=None, return_numpy=True):
X
xuwei06 已提交
312
    """
C
chengduoZH 已提交
313 314 315
    Fetch the value of the variable with the given name from the
    given scope.

X
xuwei06 已提交
316
    Args:
317 318 319 320
        name(str): name of the variable. Typically, only persistable variables
            can be found in the scope used for running the program.
        scope(core.Scope|None): scope object. It should be the scope where
            you pass to Executor.run() when running your program.
C
chengduoZH 已提交
321 322 323 324
            If None, global_scope() will be used. Default None.
        return_numpy(bool): whether convert the tensor to numpy.ndarray.
            Default True.

X
xuwei06 已提交
325 326 327 328 329 330
    Returns:
       LodTensor|numpy.ndarray
    """
    assert isinstance(name, str)
    if scope is None:
        scope = global_scope()
S
sneaxiy 已提交
331
    assert isinstance(scope, core._Scope)
X
xuwei06 已提交
332

Y
Yibing Liu 已提交
333
    var = scope.find_var(name)
334 335 336 337
    assert var is not None, (
        "Cannot find " + name + " in scope. Perhaps you need to make the"
        " variable persistable by using var.persistable = True in your"
        " program.")
X
xuwei06 已提交
338 339 340 341 342 343
    tensor = var.get_tensor()
    if return_numpy:
        tensor = as_numpy(tensor)
    return tensor


X
polish  
Xin Pan 已提交
344 345 346 347 348 349 350 351 352
def _to_name_str(var):
    if isinstance(var, Variable):
        return var.desc.name()
    elif isinstance(var, str):
        return var
    elif isinstance(var, six.string_types):
        return str(var)
    else:
        raise TypeError(str(var) + " should be Variable or str")
Q
qiaolongfei 已提交
353 354


355 356 357 358
def _get_strong_program_cache_key(program, feed, fetch_list):
    return str(id(program)) + _get_program_cache_key(feed, fetch_list)


X
polish  
Xin Pan 已提交
359 360 361
def _get_program_cache_key(feed, fetch_list):
    feed_var_names = list(feed.keys())
    fetch_var_names = list(map(_to_name_str, fetch_list))
Q
qiaolongfei 已提交
362 363 364 365

    return str(feed_var_names + fetch_var_names)


W
Wu Yi 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
def _as_lodtensor(data, place):
    """
        Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
        For higher dimensional sequence data, please use LoDTensor directly.

        Examples:
            >>> import paddle.fluid as fluid
            >>> place = fluid.CPUPlace()
            >>> exe = fluid.executor(place)
            >>> data = np.array(size=(100, 200, 300))
            >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
            >>>     ...

        Args:
            data(numpy.ndarray): a instance of array

        Returns:
            LoDTensor
        """
    if isinstance(data, list):
        raise RuntimeError("Some of your feed data hold LoD information. \
                They can not be completely cast from a list of Python \
                ndarray to LoDTensor. Please convert data to LoDTensor \
                directly before feeding the data.\
                ")
    # single tensor case
    tensor = core.LoDTensor()
    tensor.set(data, place)
    return tensor


397
class FetchHandler(object):
D
Dong Daxiang 已提交
398 399 400
    def __init__(self, var_dict=None, period_secs=60):
        assert var_dict != None
        self.var_dict = var_dict
401 402
        self.period_secs = period_secs

D
Dong Daxiang 已提交
403 404 405 406 407
    def handler(self, res_dict):
        for key in res_dict:
            if type(res_dict[key]) is np.ndarray:
                sys.stdout.write("{}[0]: {} ".format(key, res_dict[key][0]))
        sys.stdout.write("\n")
408 409 410 411

    @staticmethod
    def help():
        print("""
D
Dong Daxiang 已提交
412 413 414 415 416 417 418 419
class FetchHandlerExample(FetchHandler):
    def handler(self, res_dict):
        print(res_dict["auc"])
        print("auc: {}, {}".format(res_dict["auc"], time.ctime()))

auc = Variable()
var_dict = {"auc": auc}
handler = FetchHandlerExample(var_dict=var_dict)
420 421 422
""")


Y
Yu Yang 已提交
423
class Executor(object):
424
    """
425
    An Executor in Python, supports single/multiple-GPU running,
C
chengduo 已提交
426 427 428 429 430 431 432 433 434
    and single/multiple-CPU running. When construction the Executor,
    the device is required.

    Args:
        place(fluid.CPUPlace()|fluid.CUDAPlace(n)): This parameter represents
            the executor run on which device.

    Returns:
        Executor
S
Fix doc  
sneaxiy 已提交
435

436
    Examples:
S
Fix doc  
sneaxiy 已提交
437 438
        .. code-block:: python

439 440 441 442 443 444 445 446 447 448 449 450
          import paddle.fluid as fluid
          import paddle.fluid.compiler as compiler
          import numpy
          import os

          use_cuda = True
          place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
C
chengduo 已提交
451
              data = fluid.data(name='X', shape=[None, 1], dtype='float32')
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)

          # Run the startup program once and only once.
          # Not need to optimize/compile the startup program.
          startup_program.random_seed=1
          exe.run(startup_program)

          # Run the main program directly without compile.
          x = numpy.random.random(size=(10, 1)).astype('float32')
          loss_data, = exe.run(train_program,
                               feed={"X": x},
                               fetch_list=[loss.name])

          # Or, compiled the program and run. See `CompiledProgram`
          # for more detail.
          # NOTE: If you use CPU to run the program, you need
          # to specify the CPU_NUM, otherwise, fluid will use
          # all the number of the logic core as the CPU_NUM,
          # in that case, the batch size of the input should be
          # greater than CPU_NUM, if not, the process will be
          # failed by an exception.
          if not use_cuda:
              os.environ['CPU_NUM'] = str(2)

          compiled_prog = compiler.CompiledProgram(
              train_program).with_data_parallel(
              loss_name=loss.name)
          loss_data, = exe.run(compiled_prog,
                               feed={"X": x},
                               fetch_list=[loss.name])
484 485
    """

D
dzhwinter 已提交
486 487
    def __init__(self, place):
        self.place = place
Q
qiaolongfei 已提交
488
        self.program_caches = dict()
489
        self.ctx_caches = dict()
490 491
        self.scope_caches = dict()
        self.var_caches = dict()
492 493 494
        p = core.Place()
        p.set_place(self.place)
        self._default_executor = core.Executor(p)
Y
Yancey1989 已提交
495
        self._closed = False
D
dzhwinter 已提交
496

497 498 499
    def _get_scope_cache(self, program_cache_key):
        return self.scope_caches.get(program_cache_key, None)

500 501 502
    def _get_ctx_cache(self, program_cache_key):
        return self.ctx_caches.get(program_cache_key, None)

Q
Qiao Longfei 已提交
503 504 505 506 507 508
    def _get_program_cache(self, program_cache_key):
        return self.program_caches.get(program_cache_key, None)

    def _add_program_cache(self, program_cache_key, program):
        self.program_caches[program_cache_key] = program

509 510 511
    def _add_ctx_cache(self, ctx_cache_key, ctx):
        self.ctx_caches[ctx_cache_key] = ctx

512 513 514
    def _add_scope_cache(self, scope_cache_key, scope):
        self.scope_caches[scope_cache_key] = scope

Q
Qiao Longfei 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
    def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
                            fetch_var_name):
        tmp_program = program.clone()

        global_block = tmp_program.global_block()

        if feed_var_name in global_block.vars:
            feed_var = global_block.var(feed_var_name)
        else:
            feed_var = global_block.create_var(
                name=feed_var_name,
                type=core.VarDesc.VarType.FEED_MINIBATCH,
                persistable=True)

        if fetch_var_name in global_block.vars:
            fetch_var = global_block.var(fetch_var_name)
        else:
            fetch_var = global_block.create_var(
                name=fetch_var_name,
                type=core.VarDesc.VarType.FETCH_LIST,
                persistable=True)

        # prepend feed operators
        if not has_feed_operators(global_block, feed, feed_var_name):
            for i, name in enumerate(feed):
                out = global_block.var(name)
W
Wu Yi 已提交
541
                global_block._prepend_op(
Q
Qiao Longfei 已提交
542 543 544 545 546 547 548 549
                    type='feed',
                    inputs={'X': [feed_var]},
                    outputs={'Out': [out]},
                    attrs={'col': i})

        # append fetch_operators
        if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
            for i, var in enumerate(fetch_list):
M
minqiyang 已提交
550 551 552
                assert isinstance(var, Variable) or isinstance(
                    var, six.string_types), (
                        "Wrong type for fetch_list[%s]: %s" % (i, type(var)))
Q
Qiao Longfei 已提交
553 554 555 556 557 558 559 560 561 562
                global_block.append_op(
                    type='fetch',
                    inputs={'X': [var]},
                    outputs={'Out': [fetch_var]},
                    attrs={'col': i})

        return tmp_program

    def _feed_data(self, program, feed, feed_var_name, scope):
        # feed var to framework
H
Huihuang Zheng 已提交
563 564
        global_block = program.global_block()
        for op in global_block.ops:
Q
Qiao Longfei 已提交
565 566 567 568
            if op.desc.type() == 'feed':
                feed_target_name = op.desc.output('Out')[0]
                cur_feed = feed[feed_target_name]
                if not isinstance(cur_feed, core.LoDTensor):
W
Wu Yi 已提交
569
                    cur_feed = _as_lodtensor(cur_feed, self.place)
H
Huihuang Zheng 已提交
570 571
                var = global_block.var(feed_target_name)
                check_feed_shape_type(var, cur_feed)
Q
Qiao Longfei 已提交
572 573 574 575 576 577 578 579
                idx = op.desc.attr('col')
                core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
            else:
                break

    def _fetch_data(self, fetch_list, fetch_var_name, scope):
        outs = [
            core.get_fetch_variable(scope, fetch_var_name, i)
M
minqiyang 已提交
580
            for i in six.moves.range(len(fetch_list))
Q
Qiao Longfei 已提交
581 582 583
        ]
        return outs

S
Fix doc  
sneaxiy 已提交
584 585 586 587 588 589
    '''
    TODO(typhoonzero): Define "no longer use" meaning? Can user create
    a new Executor for the same program and run?
    TODO(panyx0718): Why ParallelExecutor doesn't have close?
    '''

Y
Yancey1989 已提交
590 591
    def close(self):
        """
C
chengduo 已提交
592 593 594
        Close the executor. This interface is used for distributed training (PServers mode).
        This executor can not be used after calling the interface, because
        this interface releases resources associated with the current Trainer.
Y
Yancey1989 已提交
595

C
chengduo 已提交
596 597
        Returns:
            None
598 599 600 601 602 603 604 605 606 607

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid

              cpu = fluid.CPUPlace()
              exe = fluid.Executor(cpu)
              # execute training or testing
              exe.close()
Y
Yancey1989 已提交
608
        """
609 610
        if not self._closed:
            self._default_executor.close()
Y
Yancey1989 已提交
611
            self._closed = True
Y
Yancey1989 已提交
612

X
fix  
Xin Pan 已提交
613
    def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
X
polish  
Xin Pan 已提交
614
                      return_numpy):
615
        exe = program._executor
H
Huihuang Zheng 已提交
616 617 618 619 620
        # TODO(zhenghuihuang): quantization uses Graph in CompiledProgram
        # instead of program. We will add support for checking Vars in Graph
        need_check_feed = program._program is not None
        if need_check_feed:
            global_block = program._program.global_block()
621 622 623 624 625 626
        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
627
                    # always set to CPU place, since the tensor need to be split
628
                    # it is fast in CPU
629 630 631
                    assert isinstance( feed[feed_name], np.ndarray ), \
                        "The input({}) should be numpy.array, but not {}.".format(
                        feed_name, type(feed[feed_name]))
632
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
H
Huihuang Zheng 已提交
633 634 635
                if need_check_feed:
                    var = global_block.var(feed_name)
                    check_feed_shape_type(var, feed_tensor)
636 637
                feed_tensor_dict[feed_name] = feed_tensor

638
            exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
639
        elif isinstance(feed, list) or isinstance(feed, tuple):
X
fix  
Xin Pan 已提交
640
            if len(feed) != len(program._places):
641 642 643 644 645 646 647 648 649 650 651 652 653 654
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()
            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
655 656 657
                        assert isinstance(each[feed_name], np.ndarray), \
                            "The input({}) should be numpy.array, but not {}.".format(
                            feed_name, type(each[feed_name]))
X
fix  
Xin Pan 已提交
658
                        tmp.set(tensor, program._places[i])
659
                        tensor = tmp
H
Huihuang Zheng 已提交
660 661 662
                    if need_check_feed:
                        var = global_block.var(feed_name)
                        check_feed_shape_type(var, tensor)
663 664
                    res_dict[feed_name] = tensor
                res.append(res_dict)
665
            exe.feed_tensors_into_local_scopes(res)
666

X
polish  
Xin Pan 已提交
667
        fetch_var_names = list(map(_to_name_str, fetch_list))
668
        tensors = exe.run(fetch_var_names)._move_to_list()
669
        return as_numpy(tensors) if return_numpy else tensors
670

Y
Yu Yang 已提交
671
    def run(self,
Y
Yu Yang 已提交
672
            program=None,
673 674
            feed=None,
            fetch_list=None,
Y
Yu Yang 已提交
675
            feed_var_name='feed',
Y
Yu Yang 已提交
676
            fetch_var_name='fetch',
D
dzhwinter 已提交
677
            scope=None,
678 679
            return_numpy=True,
            use_program_cache=False):
680
        """
C
chengduo 已提交
681 682 683 684 685
        Run the specified :code:`Program` or :code:`CompiledProgram`. It should be noted that the executor
        will execute all the operators in :code:`Program` or :code:`CompiledProgram` without pruning some
        operators of the :code:`Program` or :code:`CompiledProgram` according to fetch_list. And you could
        specify the scope to store the :code:`Variables` during the executor running if the scope
        is not set, the executor will use the global scope, i.e. :code:`fluid.global_scope()`.
686

C
chengduo 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
        Args:
            program(Program|CompiledProgram): This parameter represents the :code:`Program` or
                :code:`CompiledProgram` to be executed. If this parameter is not provided, that
                parameter is None, the program will be set to :code:`fluid.default_main_program()`.
                The default is None.
            feed(list|dict): This parameter represents the input variables of the model.
                If it is single card training, the feed is dict type, and if it is multi-card
                training, the parameter feed can be dict or list type variable. If the
                parameter type is dict, the data in the feed will be split and sent to
                multiple devices (CPU/GPU), that is to say, the input data will be evenly
                sent to different devices, so you should make sure the number of samples of
                the current mini-batch must be greater than the number of places;
                if the parameter type is list, those data are copied directly to each device,
                so the length of this list should be equal to the number of places.
                The default is None.
            fetch_list(list): This parameter represents the variables that need to be returned
                after the model runs. The default is None.
            feed_var_name(str): This parameter represents the name of the input variable of
                the feed operator. The default is "feed".
            fetch_var_name(str): This parameter represents the name of the output variable of
                the fetch operator. The default is "fetch".
            scope(Scope): the scope used to run this program, you can switch 
                it to different scope. default is :code:`fluid.global_scope()`
            return_numpy(bool): This parameter indicates whether convert the fetched variables
                (the variable specified in the fetch list) to numpy.ndarray. if it is False,
                the type of the return value is a list of :code:`LoDTensor`. The default is True.
            use_program_cache(bool): This parameter indicates whether the input :code:`Program` is cached.
                If the parameter is True, the model may run faster in the following cases:
                the input program is :code:`fluid.Program`, and the parameters(program, feed variable name
                and fetch_list variable) of this interface remains unchanged during running.
                The default is False.
                
        Returns:

            List: The fetched result list.

        NOTES:
            1. If it is multi-card running and the feed parameter is dict type, the input data
               will be evenly sent to different cards. For example, using two GPUs to run the model,
               the input sample number is 3, that is, [0, 1, 2], the sample number on GPU0 is 1,
               that is, [0], and the sample number on GPU1 is 2, that is, [1, 2].
               If the number of samples is less than the number of devices, the program will
               throw an exception, so when running the model, you should make sure that the
               number of samples of the last batch of the data set should be greater than the
               number of CPU cores or GPU cards, if it is less than, it is recommended that
               the batch be discarded.
            2. If the number of CPU cores or GPU cards available is greater than 1, the fetch
               results are spliced together in dimension 0 for the same variable values
               (variables in fetch_list) on different devices.
736 737 738 739 740 741 742 743 744 745 746

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid
              import numpy

              # First create the Executor.
              place = fluid.CPUPlace() # fluid.CUDAPlace(0)
              exe = fluid.Executor(place)

C
chengduo 已提交
747
              data = fluid.data(name='X', shape=[None, 1], dtype='float32')
748 749 750 751 752 753 754 755 756 757 758
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              adam = fluid.optimizer.Adam()
              adam.minimize(loss)

              # Run the startup program once and only once.
              exe.run(fluid.default_startup_program())

              x = numpy.random.random(size=(10, 1)).astype('float32')
              outs = exe.run(feed={'X': x},
                             fetch_list=[loss.name])
759
        """
C
chengduo 已提交
760 761 762 763 764 765 766 767 768 769 770 771
        try:
            return self._run_impl(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)
        except Exception as e:
            if not isinstance(e, core.EOFException):
772 773
                warnings.warn(
                    "The following exception is not an EOF exception.")
774
            six.reraise(*sys.exc_info())
C
chengduo 已提交
775 776 777

    def _run_impl(self, program, feed, fetch_list, feed_var_name,
                  fetch_var_name, scope, return_numpy, use_program_cache):
Y
Yancey1989 已提交
778 779 780
        if self._closed:
            raise RuntimeError("Attempted to use a closed Executor")

C
chengduo 已提交
781
        use_default_main_program = program is None
782 783
        if program is None:
            program = default_main_program()
C
chengduo 已提交
784
        if isinstance(program, Program) and \
785
                        len(program.global_block().ops) == 0:
C
chengduo 已提交
786 787 788 789
            error_info = "The current program is empty."
            if use_default_main_program:
                error_info += " Maybe you should pass the Program or the CompiledProgram manually."
            warnings.warn(error_info)
790

791 792
        if scope is None:
            scope = global_scope()
793 794 795 796 797 798 799 800 801

        if fetch_list is not None:
            if isinstance(fetch_list, Variable) or isinstance(fetch_list, str):
                fetch_list = [fetch_list]
            assert isinstance(fetch_list, tuple) or isinstance(fetch_list, list), \
                "Currently , The fetch_list type only should be list or tuple, \n"\
                "but the input type is {}. For more information please refer to \n"\
                "the executor.run(...).".format(type(fetch_list))
        else:
X
polish  
Xin Pan 已提交
802
            fetch_list = []
803

X
polish  
Xin Pan 已提交
804
        compiled = isinstance(program, compiler.CompiledProgram)
H
Huihuang Zheng 已提交
805

X
polish  
Xin Pan 已提交
806
        # For backward compatibility, run directly.
807
        if not compiled:
C
chengduo 已提交
808
            return self._run_program(
809 810 811 812 813 814 815 816 817 818
                program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)

        program._compile(scope, self.place)
C
chengduo 已提交
819 820 821
        if program._is_inference:
            return self._run_inference(program._executor, feed)
        else:
822
            return self._run_parallel(
X
fix  
Xin Pan 已提交
823
                program,
824 825 826
                scope=scope,
                feed=feed,
                fetch_list=fetch_list,
X
polish  
Xin Pan 已提交
827
                fetch_var_name=fetch_var_name,
828 829
                return_numpy=return_numpy)

C
chengduo 已提交
830
    def _run_program(self, program, feed, fetch_list, feed_var_name,
C
chengduo 已提交
831
                     fetch_var_name, scope, return_numpy, use_program_cache):
832

833 834
        if feed is None:
            feed = {}
S
sneaxiy 已提交
835 836 837 838
        elif isinstance(feed, (list, tuple)):
            assert len(feed) == 1, "Not compiled with data parallel"
            feed = feed[0]

Q
qiaolongfei 已提交
839
        if not isinstance(feed, dict):
D
dzhwinter 已提交
840 841 842
            raise TypeError(
                "feed requires dict as its Parameter. But you passed in %s" %
                (type(feed)))
Y
Yu Yang 已提交
843

844
        assert program is not None, "The program should not be Empty"
Y
Yu Yang 已提交
845
        if not isinstance(program, Program):
D
dzhwinter 已提交
846 847 848
            raise TypeError(
                "Executor requires Program as its Parameter. But you passed in %s"
                % (type(program)))
Y
Yu Yang 已提交
849

850
        if use_program_cache:
851
            cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
Q
Qiao Longfei 已提交
852
            cached_program = self._get_program_cache(cache_key)
853
            cached_ctx = self._get_ctx_cache(cache_key)
854
            cached_scope = self._get_scope_cache(cache_key)
Q
Qiao Longfei 已提交
855 856 857 858 859 860 861 862
            if cached_program is None:
                cached_program = self._add_feed_fetch_ops(
                    program=program,
                    feed=feed,
                    fetch_list=fetch_list,
                    feed_var_name=feed_var_name,
                    fetch_var_name=fetch_var_name)
                self._add_program_cache(cache_key, cached_program)
863
                fetch_list_str = list(map(_to_name_str, fetch_list))
864
                cached_ctx = self._default_executor.prepare(
865 866 867 868 869 870 871
                    cached_program.desc, 0, fetch_list_str, False)
                # currently, we cache program, vars, sub_scope here
                # we suppose that in a life cycle of training, a user
                # will not create many programs. So, here the basic
                # rule of caching is to cache all unseen (program, var, scope)
                # when a user use use_program_cache.
                cached_scope = scope.new_scope()
872 873
                self._default_executor.create_variables(cached_program.desc,
                                                        cached_scope, 0)
874
                self._add_ctx_cache(cache_key, cached_ctx)
875
                self._add_scope_cache(cache_key, cached_scope)
Q
Qiao Longfei 已提交
876
            program = cached_program
877
            ctx = cached_ctx
878
            scope = cached_scope
879
        else:
Q
Qiao Longfei 已提交
880 881 882 883 884 885 886 887
            program = self._add_feed_fetch_ops(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name)

        self._feed_data(program, feed, feed_var_name, scope)
888
        if not use_program_cache:
C
chengduo 已提交
889 890
            self._default_executor.run(program.desc, scope, 0, True, True,
                                       fetch_var_name)
891
        else:
892 893
            self._default_executor.run_prepared_ctx(ctx, scope, False, False,
                                                    False)
894 895
        arr = scope.find_var(fetch_var_name).get_lod_tensor_array()
        tensors = arr._move_to_list()
D
dzhwinter 已提交
896
        if return_numpy:
897 898 899
            return as_numpy(tensors)
        else:
            return tensors
F
flame 已提交
900

X
Xin Pan 已提交
901 902
    def _run_inference(self, exe, feed):
        return exe.run(feed)
D
dongdaxiang 已提交
903

904 905
    def _dump_debug_info(self, program=None, trainer=None):
        with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
H
hutuxian 已提交
906
            fout.write(str(trainer))
907 908 909 910
        if program._fleet_opt:
            with open("fleet_desc.prototxt", "w") as fout:
                fout.write(str(program._fleet_opt["fleet_desc"]))

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
    def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
        filelist_length = len(dataset.dataset.get_filelist())
        if filelist_length < pipeline_num:
            pipeline_num = filelist_length
            print(
                "Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
                % (filelist_length, filelist_length))
        if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
            print(
                "Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
                % (filelist_length // pipeline_num, filelist_length))
            pipeline_opt["concurrency_list"][
                0] = filelist_length // pipeline_num
        dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
        return pipeline_num

927 928 929 930 931 932 933 934 935
    def _prepare_trainer(self,
                         program=None,
                         dataset=None,
                         scope=None,
                         thread=0,
                         debug=False,
                         fetch_list=None,
                         fetch_info=None,
                         print_period=100):
D
dongdaxiang 已提交
936 937 938 939
        if scope is None:
            scope = global_scope()
        if fetch_list is None:
            fetch_list = []
D
dongdaxiang 已提交
940 941 942
        if fetch_info is None:
            fetch_info = []
        assert len(fetch_list) == len(fetch_info)
D
dongdaxiang 已提交
943 944
        compiled = isinstance(program, compiler.CompiledProgram)
        if not compiled:
H
hutuxian 已提交
945 946 947 948 949 950
            # TODO: Need a better way to distinguish and specify different execution mode
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(program._fleet_opt)
951
            trainer._set_program(program)
952
        else:
H
hutuxian 已提交
953 954 955 956 957 958
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program.program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(
                    program.program._fleet_opt)
959
            trainer._set_program(program.program)
H
hutuxian 已提交
960

961
        if thread <= 0:
D
dongdaxiang 已提交
962 963
            if dataset.thread_num <= 0:
                raise RuntimeError(
964 965
                    "You should set thread num first, either in Dataset"
                    "or in Executor.train_from_dataset")
D
dongdaxiang 已提交
966
            else:
967
                trainer._set_thread(dataset.thread_num)
968
        else:
969
            trainer._set_thread(thread)
H
hutuxian 已提交
970

971 972
        trainer._set_debug(debug)
        trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
973
        return scope, trainer
974

975 976 977 978 979 980 981 982 983 984 985 986 987 988
    def _run_from_dataset(self,
                          program=None,
                          dataset=None,
                          scope=None,
                          thread=0,
                          is_infer=False,
                          debug=False,
                          fetch_list=None,
                          fetch_info=None,
                          print_period=100,
                          fetch_handler=None):
        if dataset is None:
            raise RuntimeError("dataset is need and should be initialized")

H
hutuxian 已提交
989 990 991
        if program._pipeline_opt is not None and program._pipeline_opt[
                "sync_steps"] != -1:
            # hack for paddlebox: sync_steps(-1) denotes paddlebox
992 993 994 995 996 997 998 999 1000 1001
            thread = self._adjust_pipeline_resource(program._pipeline_opt,
                                                    dataset, thread)

        dataset._prepare_to_run()

        scope, trainer = self._prepare_trainer(
            program=program,
            dataset=dataset,
            scope=scope,
            thread=thread,
1002 1003 1004 1005
            debug=debug,
            fetch_list=fetch_list,
            fetch_info=fetch_info,
            print_period=print_period)
1006 1007 1008 1009 1010

        trainer._set_infer(is_infer)
        trainer._gen_trainer_desc()

        self._dump_debug_info(program=program, trainer=trainer)
T
tangwei12 已提交
1011
        dataset._dynamic_adjust_before_train(trainer.proto_desc.thread_num)
1012 1013 1014 1015

        trainer_instance = self._default_executor.init_for_dataset(
            program.desc, trainer._desc(), scope, dataset.dataset)

T
tangwei12 已提交
1016 1017 1018 1019 1020 1021
        if fetch_handler is not None:
            scope0 = trainer_instance.get_worker_scope(0)
            fetch_monitor = FetchHandlerMonitor(scope0, fetch_handler)
            fetch_monitor.start()
            self._default_executor.run_from_dataset(trainer_instance)
            fetch_monitor.stop()
D
Dong Daxiang 已提交
1022
            self._default_executor.release_trainer(trainer_instance)
T
tangwei12 已提交
1023 1024 1025
        else:

            self._default_executor.run_from_dataset(trainer_instance)
D
Dong Daxiang 已提交
1026
            self._default_executor.release_trainer(trainer_instance)
T
tangwei12 已提交
1027 1028

        dataset._dynamic_adjust_after_train()
1029
        dataset._finish_to_run()
T
tangwei12 已提交
1030

1031 1032
        return None

1033 1034 1035 1036 1037
    def infer_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
1038 1039 1040
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
1041 1042
                           print_period=100,
                           fetch_handler=None):
1043
        """
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
        Infer from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
        Given a program, either a program or compiled program, infer_from_dataset will
        consume all data samples in dataset. Input scope can be given by users. By default,
        scope is global_scope(). The total number of thread run in training is `thread`.
        Thread number used in training will be minimum value of threadnum in Dataset and
        the value of thread in this interface. Debug can be set so that executor will display
        Run-Time for all operators and the throughputs of current infer task.

        The document of infer_from_dataset is almost the same as train_from_dataset,
        except that in distributed training, push gradients will be disabled in infer_from_dataset.
        infer_from_dataset() can be used for evaluation in multi-threadvery easily.
1055

1056 1057
        Args:
            program(Program|CompiledProgram): the program that needs to be run,
1058
                if not provided, then default_main_program (not compiled) will be used.
1059
            dataset(paddle.fluid.Dataset): dataset created outside this function,
1060 1061
                a user should provide a well-defined dataset before calling this function.
                Please check the document of Dataset if needed. default is None
1062
            scope(Scope): the scope used to run this program, you can switch it to different scope
1063 1064 1065
                for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. Default is 0, which
                means using thread num of dataset
1066
            debug(bool): whether a user wants to run infer_from_dataset, default is False
1067 1068
            fetch_list(Variable List): fetch variable list, each variable will be printed during
                training, default is None
1069 1070
            fetch_info(String List): print information for each variable, default is None
            print_period(int): the number of mini-batches for each print, default is 100
1071
            fetch_handler(FetchHandler): a user define class for fetch output.
1072

1073 1074 1075 1076
        Returns:
            None

        Examples:
1077 1078

            .. code-block:: python
1079

1080
                import paddle.fluid as fluid
1081 1082

                place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
1083
                exe = fluid.Executor(place)
1084 1085
                x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
                y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
1086 1087
                dataset = fluid.DatasetFactory().create_dataset()
                dataset.set_use_var([x, y])
1088 1089
                dataset.set_thread(1)
                filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
1090 1091 1092 1093
                dataset.set_filelist(filelist)
                exe.run(fluid.default_startup_program())
                exe.infer_from_dataset(program=fluid.default_main_program(),
                                       dataset=dataset)        
1094

1095
        """
1096 1097 1098
        return self._run_from_dataset(program, dataset, scope, thread, True,
                                      debug, fetch_list, fetch_info,
                                      print_period, fetch_handler)
1099 1100 1101 1102 1103 1104 1105 1106 1107

    def train_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
1108 1109
                           print_period=100,
                           fetch_handler=None):
1110 1111 1112 1113 1114 1115 1116 1117
        """
        Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
        Given a program, either a program or compiled program, train_from_dataset will
        consume all data samples in dataset. Input scope can be given by users. By default,
        scope is global_scope(). The total number of thread run in training is `thread`.
        Thread number used in training will be minimum value of threadnum in Dataset and
        the value of thread in this interface. Debug can be set so that executor will display
        Run-Time for all operators and the throughputs of current training task.
1118

1119 1120 1121 1122
        Note: train_from_dataset will destroy all resources created within executor for each run.

        Args:
            program(Program|CompiledProgram): the program that needs to be run,
1123
                if not provided, then default_main_program (not compiled) will be used.
1124
            dataset(paddle.fluid.Dataset): dataset created outside this function,
1125 1126
                a user should provide a well-defined dataset before calling this function.
                Please check the document of Dataset if needed.
1127
            scope(Scope): the scope used to run this program, you can switch it to different scope
1128 1129 1130
                for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. Default is 0, which
                means using thread num of dataset
1131
            debug(bool): whether a user wants to run train_from_dataset 
1132 1133 1134 1135 1136
            fetch_list(Variable List): fetch variable list, each variable will be printed
                during training
            fetch_info(String List): print information for each variable, its length should be equal
                to fetch_list
            print_period(int): the number of mini-batches for each print, default is 100
1137
            fetch_handler(FetchHandler): a user define class for fetch output.
1138 1139 1140

        Returns:
            None
1141
        
1142
        Examples:
1143
        
1144 1145 1146
            .. code-block:: python

              import paddle.fluid as fluid
1147 1148

              place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
1149
              exe = fluid.Executor(place)
1150 1151
              x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
              y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
1152 1153
              dataset = fluid.DatasetFactory().create_dataset()
              dataset.set_use_var([x, y])
1154 1155
              dataset.set_thread(1)
              filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
1156 1157 1158 1159
              dataset.set_filelist(filelist)
              exe.run(fluid.default_startup_program())
              exe.train_from_dataset(program=fluid.default_main_program(),
                                     dataset=dataset)
1160 1161

        """
1162 1163 1164
        return self._run_from_dataset(program, dataset, scope, thread, False,
                                      debug, fetch_list, fetch_info,
                                      print_period, fetch_handler)