executor.py 39.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
Zeng Jinle 已提交
17
import logging
18 19
import os
import multiprocessing
D
dzhwinter 已提交
20
import numpy as np
S
rename  
sneaxiy 已提交
21
from .wrapped_decorator import signature_safe_contextmanager
22
import six
23
from .framework import Program, default_main_program, Variable
24
from . import core
25 26
from . import compiler
from .. import compat as cpt
27
from .trainer_factory import TrainerFactory
28

T
Tink_Y 已提交
29
__all__ = ['Executor', 'global_scope', 'scope_guard']
Y
Yu Yang 已提交
30

Y
Yu Yang 已提交
31
g_scope = core.Scope()
F
flame 已提交
32 33
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
Y
Yu Yang 已提交
34

Y
Yu Yang 已提交
35

Y
Yang Yu 已提交
36
def global_scope():
Y
yuyang18 已提交
37 38 39 40
    """
    Get the global/default scope instance. There are a lot of APIs use
    :code:`global_scope` as its default value, e.g., :code:`Executor.run`

41 42 43 44 45 46 47 48 49
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          numpy.array(fluid.global_scope().find_var("data").get_tensor())

Y
yuyang18 已提交
50 51 52
    Returns:
        Scope: The global/default scope instance.
    """
Y
Yang Yu 已提交
53 54 55
    return g_scope


56
def _switch_scope(scope):
Y
Yang Yu 已提交
57 58 59 60 61 62
    global g_scope
    ex = g_scope
    g_scope = scope
    return ex


S
rename  
sneaxiy 已提交
63
@signature_safe_contextmanager
Y
Yang Yu 已提交
64
def scope_guard(scope):
Y
yuyang18 已提交
65 66 67 68
    """
    Change the global/default scope instance by Python `with` statement. All
    variable in runtime will assigned to the new scope.

L
lujun 已提交
69 70 71
    Args:
        scope: The new global/default scope.

Y
yuyang18 已提交
72
    Examples:
73 74
        .. code-block:: python

L
lujun 已提交
75
            import numpy
Y
yuyang18 已提交
76

L
lujun 已提交
77 78 79 80
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                 fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
            numpy.array(new_scope.find_var("data").get_tensor())
Y
yuyang18 已提交
81
    """
L
lujun 已提交
82

83
    ex = _switch_scope(scope)
Y
Yang Yu 已提交
84
    yield
85
    _switch_scope(ex)
Y
Yang Yu 已提交
86 87


D
dzhwinter 已提交
88
def as_numpy(tensor):
89 90 91
    """
    Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
    For higher dimensional sequence data, please use LoDTensor directly.
92

93
    Examples:
94 95 96 97 98 99 100 101 102 103
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          new_scope = fluid.Scope()
          with fluid.scope_guard(new_scope):
              fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          tensor = new_scope.find_var("data").get_tensor()
          fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
104 105 106 107 108 109 110

    Args:
       tensor(Variable): a instance of Tensor

    Returns:
        numpy.ndarray
    """
C
chengduo 已提交
111 112
    if isinstance(tensor, core.LoDTensorArray):
        return [as_numpy(t) for t in tensor]
D
dzhwinter 已提交
113 114 115 116
    if isinstance(tensor, list):
        return [as_numpy(t) for t in tensor]
    assert isinstance(tensor, core.LoDTensor)
    lod = tensor.lod()
117
    if len(lod) > 0:
D
dzhwinter 已提交
118
        raise RuntimeError("Some of your fetched tensors hold LoD information. \
119 120 121
            They can not be completely cast to Python ndarray. \
            Please set the parameter 'return_numpy' as 'False' to \
            return LoDTensor itself directly.")
Q
qingqing01 已提交
122 123 124 125
    if tensor._is_initialized():
        return np.array(tensor)
    else:
        return None
D
dzhwinter 已提交
126 127


128 129 130 131 132 133 134 135 136 137 138 139
def has_feed_operators(block, feed_targets, feed_holder_name):
    """ Check whether the block already has feed operators.

    Return false if the block does not have any feed operators.
    If some feed operators have been prepended to the block, check that
    the info contained in these feed operators matches the feed_targets
    and feed_holder_name. Raise exception when any mismatch is found.
    Return true when the block has feed operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        feed_targets: a dictionary of {feed_target_name: feed_target_data}
X
xuwei06 已提交
140 141
        feed_holder_name: the name of the variable that holds the data of
            all feed targets. The type of this feed_holder variable is
142 143 144
            FEED_MINIBATCH, which is essentially vector<LoDTensor>.

    Returns:
X
xuwei06 已提交
145
        A boolean value that indicates whether a block has feed operators
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        that match the info contained in feed_targets and feed_holder_name.
    """

    feed_count = 0
    for op in block.ops:
        if op.desc.type() == 'feed':
            feed_count += 1
            assert op.desc.input('X')[0] == feed_holder_name
            feed_target_name = op.desc.output('Out')[0]
            if feed_target_name not in feed_targets:
                raise Exception("'feed_targets' does not have {} variable".
                                format(feed_target_name))
        else:
            break
    if feed_count > 0 and feed_count != len(feed_targets):
        raise Exception(
            "Feed operators in program desc do not match 'feed_targets'")
    return feed_count > 0


def has_fetch_operators(block, fetch_targets, fetch_holder_name):
    """ Check whether the block already has fetch operators.
X
xuwei06 已提交
168

169 170 171 172 173 174 175 176 177
    Return false if the block does not have any fetch operators.
    If some fetch operators have been appended to the block, check that
    the info contained in these fetch operators matches the fetch_targets
    and fetch_holder_name. Raise exception when any mismatch is found.
    Return true when the block has fetch operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
X
xuwei06 已提交
178 179 180
        fetch_holder_name: the name of the variable that holds the data of
            all fetch targets. The type of this fetch_holder variable is
            FETCH_LIST, which is essentially vector<LoDTensor>.
181

X
xuwei06 已提交
182 183 184
    Return:
        A boolean value that indicates whether a block has fetch operators
        that match the info contained in fetch_targets and fetch_holder_name.
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
    """

    fetch_count = 0
    for op in block.ops:
        if op.desc.type() == 'fetch':
            fetch_count += 1
            assert op.desc.output('Out')[0] == fetch_holder_name
            fetch_target_name = op.desc.input('X')[0]
            if fetch_target_name not in [
                    var.desc.name() for var in fetch_targets
            ]:
                raise Exception("'fetch_targets' does not have {} variable".
                                format(fetch_target_name))
            idx = op.desc.attr('col')
            assert fetch_target_name == fetch_targets[idx].desc.name()
    if fetch_count > 0 and fetch_count != len(fetch_targets):
        raise Exception(
            "Fetch operators in program desc do not match 'fetch_targets'")
    return fetch_count > 0


W
Wu Yi 已提交
206
def _fetch_var(name, scope=None, return_numpy=True):
X
xuwei06 已提交
207
    """
C
chengduoZH 已提交
208 209 210
    Fetch the value of the variable with the given name from the
    given scope.

X
xuwei06 已提交
211
    Args:
212 213 214 215
        name(str): name of the variable. Typically, only persistable variables
            can be found in the scope used for running the program.
        scope(core.Scope|None): scope object. It should be the scope where
            you pass to Executor.run() when running your program.
C
chengduoZH 已提交
216 217 218 219
            If None, global_scope() will be used. Default None.
        return_numpy(bool): whether convert the tensor to numpy.ndarray.
            Default True.

X
xuwei06 已提交
220 221 222 223 224 225
    Returns:
       LodTensor|numpy.ndarray
    """
    assert isinstance(name, str)
    if scope is None:
        scope = global_scope()
S
sneaxiy 已提交
226
    assert isinstance(scope, core._Scope)
X
xuwei06 已提交
227

Y
Yibing Liu 已提交
228
    var = scope.find_var(name)
229 230 231 232
    assert var is not None, (
        "Cannot find " + name + " in scope. Perhaps you need to make the"
        " variable persistable by using var.persistable = True in your"
        " program.")
X
xuwei06 已提交
233 234 235 236 237 238
    tensor = var.get_tensor()
    if return_numpy:
        tensor = as_numpy(tensor)
    return tensor


X
polish  
Xin Pan 已提交
239 240 241 242 243 244 245 246 247
def _to_name_str(var):
    if isinstance(var, Variable):
        return var.desc.name()
    elif isinstance(var, str):
        return var
    elif isinstance(var, six.string_types):
        return str(var)
    else:
        raise TypeError(str(var) + " should be Variable or str")
Q
qiaolongfei 已提交
248 249


250 251 252 253
def _get_strong_program_cache_key(program, feed, fetch_list):
    return str(id(program)) + _get_program_cache_key(feed, fetch_list)


X
polish  
Xin Pan 已提交
254 255 256
def _get_program_cache_key(feed, fetch_list):
    feed_var_names = list(feed.keys())
    fetch_var_names = list(map(_to_name_str, fetch_list))
Q
qiaolongfei 已提交
257 258 259 260

    return str(feed_var_names + fetch_var_names)


W
Wu Yi 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
def _as_lodtensor(data, place):
    """
        Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
        For higher dimensional sequence data, please use LoDTensor directly.

        Examples:
            >>> import paddle.fluid as fluid
            >>> place = fluid.CPUPlace()
            >>> exe = fluid.executor(place)
            >>> data = np.array(size=(100, 200, 300))
            >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
            >>>     ...

        Args:
            data(numpy.ndarray): a instance of array

        Returns:
            LoDTensor
        """
    if isinstance(data, list):
        raise RuntimeError("Some of your feed data hold LoD information. \
                They can not be completely cast from a list of Python \
                ndarray to LoDTensor. Please convert data to LoDTensor \
                directly before feeding the data.\
                ")
    # single tensor case
    tensor = core.LoDTensor()
    tensor.set(data, place)
    return tensor


Y
Yu Yang 已提交
292
class Executor(object):
293
    """
294 295 296 297 298 299 300 301 302 303 304
    An Executor in Python, supports single/multiple-GPU running,
    and single/multiple-CPU running. Python executor takes a program,
    adds feed operators and fetch operators to this program according
    to feed map and fetch_list. Feed map provides input data for the
    program. fetch_list provides the variables(or names) that user wants
    to get after program runs. Note: the executor will run all operators
    in the program but not only the operators dependent by the fetch_list.
    It stores the global variables into the global scope, and creates a
    local scope for the temporary variables. The contents in local scope
    may be discarded after every minibatch forward/backward finished.
    But the global scope variables will be persistent through different runs.
S
Fix doc  
sneaxiy 已提交
305

306
    Examples:
S
Fix doc  
sneaxiy 已提交
307 308
        .. code-block:: python

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
          import paddle.fluid as fluid
          import paddle.fluid.compiler as compiler
          import numpy
          import os

          use_cuda = True
          place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)

          # Run the startup program once and only once.
          # Not need to optimize/compile the startup program.
          startup_program.random_seed=1
          exe.run(startup_program)

          # Run the main program directly without compile.
          x = numpy.random.random(size=(10, 1)).astype('float32')
          loss_data, = exe.run(train_program,
                               feed={"X": x},
                               fetch_list=[loss.name])

          # Or, compiled the program and run. See `CompiledProgram`
          # for more detail.
          # NOTE: If you use CPU to run the program, you need
          # to specify the CPU_NUM, otherwise, fluid will use
          # all the number of the logic core as the CPU_NUM,
          # in that case, the batch size of the input should be
          # greater than CPU_NUM, if not, the process will be
          # failed by an exception.
          if not use_cuda:
              os.environ['CPU_NUM'] = str(2)

          compiled_prog = compiler.CompiledProgram(
              train_program).with_data_parallel(
              loss_name=loss.name)
          loss_data, = exe.run(compiled_prog,
                               feed={"X": x},
                               fetch_list=[loss.name])
X
add doc  
Xin Pan 已提交
354

355
    Args:
356 357
        place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.

358 359
    """

D
dzhwinter 已提交
360 361
    def __init__(self, place):
        self.place = place
Q
qiaolongfei 已提交
362
        self.program_caches = dict()
363
        self.ctx_caches = dict()
364 365
        self.scope_caches = dict()
        self.var_caches = dict()
366 367 368
        p = core.Place()
        p.set_place(self.place)
        self._default_executor = core.Executor(p)
Y
Yancey1989 已提交
369
        self._closed = False
D
dzhwinter 已提交
370

371 372 373 374 375 376
    def _get_var_cache(self, program_cache_key):
        return self.var_caches.get(program_cache_key, None)

    def _get_scope_cache(self, program_cache_key):
        return self.scope_caches.get(program_cache_key, None)

377 378 379
    def _get_ctx_cache(self, program_cache_key):
        return self.ctx_caches.get(program_cache_key, None)

Q
Qiao Longfei 已提交
380 381 382 383 384 385
    def _get_program_cache(self, program_cache_key):
        return self.program_caches.get(program_cache_key, None)

    def _add_program_cache(self, program_cache_key, program):
        self.program_caches[program_cache_key] = program

386 387 388
    def _add_ctx_cache(self, ctx_cache_key, ctx):
        self.ctx_caches[ctx_cache_key] = ctx

389 390 391 392 393 394
    def _add_scope_cache(self, scope_cache_key, scope):
        self.scope_caches[scope_cache_key] = scope

    def _add_var_cache(self, var_cache_key, var):
        self.var_caches[var_cache_key] = var

Q
Qiao Longfei 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
    def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
                            fetch_var_name):
        tmp_program = program.clone()

        global_block = tmp_program.global_block()

        if feed_var_name in global_block.vars:
            feed_var = global_block.var(feed_var_name)
        else:
            feed_var = global_block.create_var(
                name=feed_var_name,
                type=core.VarDesc.VarType.FEED_MINIBATCH,
                persistable=True)

        if fetch_var_name in global_block.vars:
            fetch_var = global_block.var(fetch_var_name)
        else:
            fetch_var = global_block.create_var(
                name=fetch_var_name,
                type=core.VarDesc.VarType.FETCH_LIST,
                persistable=True)

        # prepend feed operators
        if not has_feed_operators(global_block, feed, feed_var_name):
            for i, name in enumerate(feed):
                out = global_block.var(name)
W
Wu Yi 已提交
421
                global_block._prepend_op(
Q
Qiao Longfei 已提交
422 423 424 425 426 427 428 429
                    type='feed',
                    inputs={'X': [feed_var]},
                    outputs={'Out': [out]},
                    attrs={'col': i})

        # append fetch_operators
        if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
            for i, var in enumerate(fetch_list):
M
minqiyang 已提交
430 431 432
                assert isinstance(var, Variable) or isinstance(
                    var, six.string_types), (
                        "Wrong type for fetch_list[%s]: %s" % (i, type(var)))
Q
Qiao Longfei 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
                global_block.append_op(
                    type='fetch',
                    inputs={'X': [var]},
                    outputs={'Out': [fetch_var]},
                    attrs={'col': i})

        return tmp_program

    def _feed_data(self, program, feed, feed_var_name, scope):
        # feed var to framework
        for op in program.global_block().ops:
            if op.desc.type() == 'feed':
                feed_target_name = op.desc.output('Out')[0]
                cur_feed = feed[feed_target_name]
                if not isinstance(cur_feed, core.LoDTensor):
W
Wu Yi 已提交
448
                    cur_feed = _as_lodtensor(cur_feed, self.place)
Q
Qiao Longfei 已提交
449 450 451 452 453 454 455 456
                idx = op.desc.attr('col')
                core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
            else:
                break

    def _fetch_data(self, fetch_list, fetch_var_name, scope):
        outs = [
            core.get_fetch_variable(scope, fetch_var_name, i)
M
minqiyang 已提交
457
            for i in six.moves.range(len(fetch_list))
Q
Qiao Longfei 已提交
458 459 460
        ]
        return outs

S
Fix doc  
sneaxiy 已提交
461 462 463 464 465 466
    '''
    TODO(typhoonzero): Define "no longer use" meaning? Can user create
    a new Executor for the same program and run?
    TODO(panyx0718): Why ParallelExecutor doesn't have close?
    '''

Y
Yancey1989 已提交
467 468 469 470
    def close(self):
        """
        Close this executor.

X
fix  
Xin Pan 已提交
471
        You can no longer use this executor after calling this method.
472 473 474 475 476 477 478 479 480 481 482 483
        For the distributed training, this method would free the resource
        on PServers related to the current Trainer.

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid

              cpu = fluid.CPUPlace()
              exe = fluid.Executor(cpu)
              # execute training or testing
              exe.close()
Y
Yancey1989 已提交
484
        """
485 486
        if not self._closed:
            self._default_executor.close()
Y
Yancey1989 已提交
487
            self._closed = True
Y
Yancey1989 已提交
488

X
fix  
Xin Pan 已提交
489
    def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
X
polish  
Xin Pan 已提交
490
                      return_numpy):
491
        exe = program._executor
492 493 494 495 496 497 498 499 500 501 502
        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

503
            exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
504
        elif isinstance(feed, list) or isinstance(feed, tuple):
X
fix  
Xin Pan 已提交
505
            if len(feed) != len(program._places):
506 507 508 509 510 511 512 513 514 515 516 517 518 519
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()
            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
X
fix  
Xin Pan 已提交
520
                        tmp.set(tensor, program._places[i])
521 522 523
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
524
            exe.feed_tensors_into_local_scopes(res)
525

X
polish  
Xin Pan 已提交
526
        fetch_var_names = list(map(_to_name_str, fetch_list))
527
        exe.run(fetch_var_names, fetch_var_name)
528 529 530 531 532 533
        arr = scope.find_var(fetch_var_name).get_lod_tensor_array()

        if return_numpy:
            return as_numpy(arr)
        return [arr[i] for i in range(len(arr))]

Z
Zeng Jinle 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
    def _check_fetch_vars_persistable(self, program, fetch_list):
        for var in fetch_list:
            if isinstance(var, Variable):
                persistable = var.persistable
            else:
                block_num = program.desc.num_blocks()
                persistable = None
                var_name = cpt.to_bytes(var)
                for i in six.moves.range(block_num):
                    var_desc = program.desc.block(i).find_var(var_name)
                    if var_desc:
                        persistable = var_desc.persistable()
                        break
                assert persistable is not None, "Variable {} is not found".format(
                    var)

            if not persistable:
                logging.warn("""
     Detect that memory optimize or inplace is enabled, but the some variables in the fetch
     list is not persistable, you may get wrong fetched value, or an exeception may be thrown
     about cannot find variable of the fetch list. 

     TO FIX this:
         # Sample
         conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) 
         # if you need to fetch conv1, then:
         conv1.persistable = True

                 """)

Y
Yu Yang 已提交
564
    def run(self,
Y
Yu Yang 已提交
565
            program=None,
566 567
            feed=None,
            fetch_list=None,
Y
Yu Yang 已提交
568
            feed_var_name='feed',
Y
Yu Yang 已提交
569
            fetch_var_name='fetch',
D
dzhwinter 已提交
570
            scope=None,
571 572
            return_numpy=True,
            use_program_cache=False):
573
        """
574 575 576 577
        Run program by this Executor. Feed data by feed map, fetch result by
        fetch_list. Python executor takes a program, add feed operators and
        fetch operators to this program according to feed map and fetch_list.
        Feed map provides input data for the program. fetch_list provides
578 579
        the variables(or names) that user want to get after program run.

580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
        Note: the executor will run all operators in the program but not
        only the operators dependent by the fetch_list.

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid
              import numpy

              # First create the Executor.
              place = fluid.CPUPlace() # fluid.CUDAPlace(0)
              exe = fluid.Executor(place)

              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              adam = fluid.optimizer.Adam()
              adam.minimize(loss)

              # Run the startup program once and only once.
              exe.run(fluid.default_startup_program())

              x = numpy.random.random(size=(10, 1)).astype('float32')
              outs = exe.run(feed={'X': x},
                             fetch_list=[loss.name])
Q
qiaolongfei 已提交
605

606
        Args:
X
add doc  
Xin Pan 已提交
607
            program(Program|CompiledProgram): the program that need to run,
X
fix  
Xin Pan 已提交
608
                if not provided, then default_main_program (not compiled) will be used.
X
add doc  
Xin Pan 已提交
609
            feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
Z
Zeng Jinle 已提交
610 611 612 613 614 615 616 617
            fetch_list(list): a list of variable or variable names that user 
                wants to get, this method will return them according to this list.
            feed_var_name(str): the name for the input variable of 
                feed Operator.
            fetch_var_name(str): the name for the output variable of 
                fetch Operator.
            scope(Scope): the scope used to run this program, you can switch 
                it to different scope. default is global_scope
618
            return_numpy(bool): if convert the fetched tensor to numpy
Z
Zeng Jinle 已提交
619 620 621 622 623 624
            use_program_cache(bool): whether to use the cached program 
                settings across batches. Setting it be true would be faster 
                only when (1) the program is not compiled with data parallel, 
                and (2) program, feed variable names and fetch_list variable 
                names do not changed compared to the last step. 
                
625 626 627
        Returns:

            list(numpy.array): fetch result according to fetch_list.
628
        """
Y
Yancey1989 已提交
629 630 631 632

        if self._closed:
            raise RuntimeError("Attempted to use a closed Executor")

633 634
        if scope is None:
            scope = global_scope()
X
polish  
Xin Pan 已提交
635 636
        if fetch_list is None:
            fetch_list = []
637

X
polish  
Xin Pan 已提交
638 639
        compiled = isinstance(program, compiler.CompiledProgram)
        # For backward compatibility, run directly.
640 641 642
        if not compiled:
            return self._run(
                program,
643
                self._default_executor,
644 645 646 647 648 649 650
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)
Z
Zeng Jinle 已提交
651 652 653 654 655
        else:
            if fetch_list and program._is_data_parallel and program._program and (
                    program._build_strategy.memory_optimize or
                    program._build_strategy.enable_inplace):
                self._check_fetch_vars_persistable(program._program, fetch_list)
656 657 658 659

        program._compile(scope, self.place)
        if program._is_data_parallel:
            return self._run_parallel(
X
fix  
Xin Pan 已提交
660
                program,
661 662 663
                scope=scope,
                feed=feed,
                fetch_list=fetch_list,
X
polish  
Xin Pan 已提交
664
                fetch_var_name=fetch_var_name,
665
                return_numpy=return_numpy)
F
flame 已提交
666
        elif program._is_inference:
X
Xin Pan 已提交
667
            return self._run_inference(program._executor, feed)
668
        else:
X
Xin Pan 已提交
669 670
            # TODO(panyx0718): Can compile program to optimize executor
            # performance.
X
Xin Pan 已提交
671
            # TODO(panyx0718): executor should be able to run graph.
X
Xin Pan 已提交
672
            assert program._program, "CompiledProgram is compiled from graph, can only run with_data_parallel."
673
            # use_program_cache is not valid with CompiledProgram
674 675
            return self._run(
                program._program,
676
                self._default_executor,
677 678 679 680 681 682
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
683
                use_program_cache=False)
684

685 686
    def _run(self, program, exe, feed, fetch_list, feed_var_name,
             fetch_var_name, scope, return_numpy, use_program_cache):
687

688 689
        if feed is None:
            feed = {}
S
sneaxiy 已提交
690 691 692 693
        elif isinstance(feed, (list, tuple)):
            assert len(feed) == 1, "Not compiled with data parallel"
            feed = feed[0]

Q
qiaolongfei 已提交
694
        if not isinstance(feed, dict):
D
dzhwinter 已提交
695 696 697
            raise TypeError(
                "feed requires dict as its Parameter. But you passed in %s" %
                (type(feed)))
Y
Yu Yang 已提交
698
        if program is None:
Y
Yu Yang 已提交
699
            program = default_main_program()
Y
Yu Yang 已提交
700

Y
Yu Yang 已提交
701
        if not isinstance(program, Program):
D
dzhwinter 已提交
702 703 704
            raise TypeError(
                "Executor requires Program as its Parameter. But you passed in %s"
                % (type(program)))
Y
Yu Yang 已提交
705

706
        if use_program_cache:
707
            cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
Q
Qiao Longfei 已提交
708
            cached_program = self._get_program_cache(cache_key)
709
            cached_ctx = self._get_ctx_cache(cache_key)
710 711
            cached_scope = self._get_scope_cache(cache_key)
            cached_var = self._get_var_cache(cache_key)
Q
Qiao Longfei 已提交
712 713 714 715 716 717 718 719
            if cached_program is None:
                cached_program = self._add_feed_fetch_ops(
                    program=program,
                    feed=feed,
                    fetch_list=fetch_list,
                    feed_var_name=feed_var_name,
                    fetch_var_name=fetch_var_name)
                self._add_program_cache(cache_key, cached_program)
720
                fetch_list_str = list(map(_to_name_str, fetch_list))
721
                cached_ctx = self._default_executor.prepare_ctx_cache(
722 723 724 725 726 727 728 729 730
                    cached_program.desc, 0, fetch_list_str, False)
                cached_var = self._default_executor.create_variables(
                    cached_program.desc, scope, 0)
                # currently, we cache program, vars, sub_scope here
                # we suppose that in a life cycle of training, a user
                # will not create many programs. So, here the basic
                # rule of caching is to cache all unseen (program, var, scope)
                # when a user use use_program_cache.
                cached_scope = scope.new_scope()
731
                self._add_ctx_cache(cache_key, cached_ctx)
732 733
                self._add_var_cache(cache_key, cached_var)
                self._add_scope_cache(cache_key, cached_scope)
Q
Qiao Longfei 已提交
734
            program = cached_program
735
            ctx = cached_ctx
736 737
            scope = cached_scope
            var = cached_var
738
        else:
Q
Qiao Longfei 已提交
739 740 741 742 743 744 745 746
            program = self._add_feed_fetch_ops(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name)

        self._feed_data(program, feed, feed_var_name, scope)
747 748 749
        if not use_program_cache:
            exe.run(program.desc, scope, 0, True, True, fetch_var_name)
        else:
750
            exe.run_cached_prepared_ctx(ctx, scope, False, False, False)
Q
Qiao Longfei 已提交
751
        outs = self._fetch_data(fetch_list, fetch_var_name, scope)
D
dzhwinter 已提交
752 753 754
        if return_numpy:
            outs = as_numpy(outs)
        return outs
F
flame 已提交
755

X
Xin Pan 已提交
756 757
    def _run_inference(self, exe, feed):
        return exe.run(feed)
D
dongdaxiang 已提交
758

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
    def _dump_debug_info(self, program=None, trainer=None):
        with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
            fout.write(trainer._desc())
        if program._fleet_opt:
            with open("fleet_desc.prototxt", "w") as fout:
                fout.write(str(program._fleet_opt["fleet_desc"]))

    def _prepare_trainer(self,
                         program=None,
                         dataset=None,
                         scope=None,
                         thread=0,
                         debug=False,
                         fetch_list=None,
                         fetch_info=None,
                         print_period=100):
D
dongdaxiang 已提交
775 776 777 778
        if scope is None:
            scope = global_scope()
        if fetch_list is None:
            fetch_list = []
D
dongdaxiang 已提交
779 780 781
        if fetch_info is None:
            fetch_info = []
        assert len(fetch_list) == len(fetch_info)
D
dongdaxiang 已提交
782 783
        compiled = isinstance(program, compiler.CompiledProgram)
        if not compiled:
H
hutuxian 已提交
784 785 786 787 788 789
            # TODO: Need a better way to distinguish and specify different execution mode
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(program._fleet_opt)
790
            trainer._set_program(program)
791
        else:
H
hutuxian 已提交
792 793 794 795 796 797
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program.program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(
                    program.program._fleet_opt)
798
            trainer._set_program(program.program)
H
hutuxian 已提交
799 800

        # The following thread_num-determined logic will be deprecated
801
        if thread <= 0:
D
dongdaxiang 已提交
802 803
            if dataset.thread_num <= 0:
                raise RuntimeError(
804 805
                    "You should set thread num first, either in Dataset"
                    "or in Executor.train_from_dataset")
D
dongdaxiang 已提交
806
            else:
807
                trainer._set_thread(dataset.thread_num)
808
        else:
809
            trainer._set_thread(thread)
H
hutuxian 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829

        # Adjust the reader size for small file num
        if program._pipeline_opt:
            dataset.set_thread(thread *
                               program._pipeline_opt["concurrency_list"][0])
            file_size = len(dataset.dataset.get_filelist())
            if file_size < thread:
                thread = file_size
                print(
                    "Pipeline: setting the pipeline num to %d is enough because there are only %d files"
                    % (file_size, file_size))
            if file_size < thread * program._pipeline_opt["concurrency_list"][
                    0]:
                print(
                    "Pipeline: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
                    % (file_size / thread, file_size))
                program._pipeline_opt["concurrency_list"][
                    0] = file_size / thread
                dataset.set_thread(
                    program._pipeline_opt["concurrency_list"][0] * thread)
830 831
        trainer._set_debug(debug)
        trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
832
        return scope, trainer
833 834 835 836 837 838

    def infer_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
839 840 841 842
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
                           print_period=100):
843 844 845 846 847 848
        """
        The document of infer_from_dataset is almost the same as
        train_from_dataset, except that in distributed training,
        push gradients will be disabled in infer_from_dataset.
        infer_from_dataset() can be used for evaluation in multi-thread
        very easily.
849

850 851 852 853 854
        Args:
            program(Program|CompiledProgram): the program that needs to be run,
               if not provided, then default_main_program (not compiled) will be used.
            dataset(paddle.fluid.Dataset): dataset created outside this function,
               a user should provide a well-defined dataset before calling this function.
855
               Please check the document of Dataset if needed. default is None
856 857 858
            scope(Scope): the scope used to run this program, you can switch it to different scope
               for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. The actual number
859 860
               of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
            debug(bool): whether a user wants to run infer_from_dataset, default is False
861
            fetch_list(Variable List): fetch variable list, each variable
862 863 864
                                       will be printed during training, default is None
            fetch_info(String List): print information for each variable, default is None
            print_period(int): the number of mini-batches for each print, default is 100
865

866 867 868 869
        Returns:
            None

        Examples:
870 871

            .. code-block:: python
872

873
                import paddle.fluid as fluid
874 875

                place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
876
                exe = fluid.Executor(place)
877 878
                x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
                y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
879 880
                dataset = fluid.DatasetFactory().create_dataset()
                dataset.set_use_var([x, y])
881 882
                dataset.set_thread(1)
                filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
883 884 885 886
                dataset.set_filelist(filelist)
                exe.run(fluid.default_startup_program())
                exe.infer_from_dataset(program=fluid.default_main_program(),
                                       dataset=dataset)        
887

888
        """
889 890 891
        if dataset == None:
            raise RuntimeError("dataset is needed and should be initialized")

892
        scope, trainer = self._prepare_trainer(
893 894 895 896 897 898 899 900
            program=program,
            dataset=dataset,
            scope=scope,
            thread=thread,
            debug=debug,
            fetch_list=fetch_list,
            fetch_info=fetch_info,
            print_period=print_period)
901
        trainer._set_infer(True)
902
        trainer._gen_trainer_desc()
903
        dataset._prepare_to_run()
904
        self._dump_debug_info(program=program, trainer=trainer)
905 906 907
        self._default_executor.run_from_dataset(program.desc, scope,
                                                dataset.dataset,
                                                trainer._desc())
908
        return None
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

    def train_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
                           print_period=100):
        """
        Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
        Given a program, either a program or compiled program, train_from_dataset will
        consume all data samples in dataset. Input scope can be given by users. By default,
        scope is global_scope(). The total number of thread run in training is `thread`.
        Thread number used in training will be minimum value of threadnum in Dataset and
        the value of thread in this interface. Debug can be set so that executor will display
        Run-Time for all operators and the throughputs of current training task.
        
        Note: train_from_dataset will destroy all resources created within executor for each run.

        Args:
            program(Program|CompiledProgram): the program that needs to be run,
               if not provided, then default_main_program (not compiled) will be used.
            dataset(paddle.fluid.Dataset): dataset created outside this function,
               a user should provide a well-defined dataset before calling this function.
               Please check the document of Dataset if needed.
            scope(Scope): the scope used to run this program, you can switch it to different scope
               for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. The actual number
               of thread will be min(Dataset.thread_num, thread)
            debug(bool): whether a user wants to run train_from_dataset 
            fetch_list(Variable List): fetch variable list, each variable
                                       will be printed during training
            fetch_info(String List): print information for each variable
            print_period(int): the number of mini-batches for each print
945 946 947

        Returns:
            None
948
        
949
        Examples:
950
        
951 952 953
            .. code-block:: python

              import paddle.fluid as fluid
954 955

              place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
956
              exe = fluid.Executor(place)
957 958
              x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
              y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
959 960
              dataset = fluid.DatasetFactory().create_dataset()
              dataset.set_use_var([x, y])
961 962
              dataset.set_thread(1)
              filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
963 964 965 966
              dataset.set_filelist(filelist)
              exe.run(fluid.default_startup_program())
              exe.train_from_dataset(program=fluid.default_main_program(),
                                     dataset=dataset)
967 968

        """
969 970 971
        if dataset == None:
            raise RuntimeError("dataset is need and should be initialized")

972
        scope, trainer = self._prepare_trainer(
973 974 975 976 977 978 979 980
            program=program,
            dataset=dataset,
            scope=scope,
            thread=thread,
            debug=debug,
            fetch_list=fetch_list,
            fetch_info=fetch_info,
            print_period=print_period)
981
        trainer._gen_trainer_desc()
D
dongdaxiang 已提交
982
        dataset._prepare_to_run()
983
        self._dump_debug_info(program=program, trainer=trainer)
D
dongdaxiang 已提交
984 985 986
        self._default_executor.run_from_dataset(program.desc, scope,
                                                dataset.dataset,
                                                trainer._desc())
987
        return None