executor.py 38.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Z
Zeng Jinle 已提交
17
import logging
18 19
import os
import multiprocessing
C
chengduo 已提交
20
import sys
D
dzhwinter 已提交
21
import numpy as np
S
rename  
sneaxiy 已提交
22
from .wrapped_decorator import signature_safe_contextmanager
23
import six
24
from .framework import Program, default_main_program, Variable
25
from . import core
26 27
from . import compiler
from .. import compat as cpt
28
from .trainer_factory import TrainerFactory
29

T
Tink_Y 已提交
30
__all__ = ['Executor', 'global_scope', 'scope_guard']
Y
Yu Yang 已提交
31

Y
Yu Yang 已提交
32
g_scope = core.Scope()
F
flame 已提交
33 34
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
Y
Yu Yang 已提交
35

Y
Yu Yang 已提交
36

Y
Yang Yu 已提交
37
def global_scope():
Y
yuyang18 已提交
38 39 40 41
    """
    Get the global/default scope instance. There are a lot of APIs use
    :code:`global_scope` as its default value, e.g., :code:`Executor.run`

42 43 44 45 46 47 48 49 50
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          numpy.array(fluid.global_scope().find_var("data").get_tensor())

Y
yuyang18 已提交
51 52 53
    Returns:
        Scope: The global/default scope instance.
    """
Y
Yang Yu 已提交
54 55 56
    return g_scope


57
def _switch_scope(scope):
Y
Yang Yu 已提交
58 59 60 61 62 63
    global g_scope
    ex = g_scope
    g_scope = scope
    return ex


S
rename  
sneaxiy 已提交
64
@signature_safe_contextmanager
Y
Yang Yu 已提交
65
def scope_guard(scope):
Y
yuyang18 已提交
66 67 68 69
    """
    Change the global/default scope instance by Python `with` statement. All
    variable in runtime will assigned to the new scope.

L
lujun 已提交
70 71 72
    Args:
        scope: The new global/default scope.

Y
yuyang18 已提交
73
    Examples:
74 75
        .. code-block:: python

76
            import paddle.fluid as fluid
L
lujun 已提交
77
            import numpy
Y
yuyang18 已提交
78

L
lujun 已提交
79 80 81 82
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                 fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
            numpy.array(new_scope.find_var("data").get_tensor())
Y
yuyang18 已提交
83
    """
L
lujun 已提交
84

85
    ex = _switch_scope(scope)
Y
Yang Yu 已提交
86
    yield
87
    _switch_scope(ex)
Y
Yang Yu 已提交
88 89


D
dzhwinter 已提交
90
def as_numpy(tensor):
91 92 93
    """
    Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
    For higher dimensional sequence data, please use LoDTensor directly.
94

95
    Examples:
96 97 98 99 100 101 102 103 104 105
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy

          new_scope = fluid.Scope()
          with fluid.scope_guard(new_scope):
              fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
          tensor = new_scope.find_var("data").get_tensor()
          fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
106 107 108 109 110 111 112

    Args:
       tensor(Variable): a instance of Tensor

    Returns:
        numpy.ndarray
    """
C
chengduo 已提交
113 114
    if isinstance(tensor, core.LoDTensorArray):
        return [as_numpy(t) for t in tensor]
D
dzhwinter 已提交
115 116 117 118
    if isinstance(tensor, list):
        return [as_numpy(t) for t in tensor]
    assert isinstance(tensor, core.LoDTensor)
    lod = tensor.lod()
119
    if len(lod) > 0:
D
dzhwinter 已提交
120
        raise RuntimeError("Some of your fetched tensors hold LoD information. \
121 122 123
            They can not be completely cast to Python ndarray. \
            Please set the parameter 'return_numpy' as 'False' to \
            return LoDTensor itself directly.")
Q
qingqing01 已提交
124 125 126 127
    if tensor._is_initialized():
        return np.array(tensor)
    else:
        return None
D
dzhwinter 已提交
128 129


130 131 132 133 134 135 136 137 138 139 140 141
def has_feed_operators(block, feed_targets, feed_holder_name):
    """ Check whether the block already has feed operators.

    Return false if the block does not have any feed operators.
    If some feed operators have been prepended to the block, check that
    the info contained in these feed operators matches the feed_targets
    and feed_holder_name. Raise exception when any mismatch is found.
    Return true when the block has feed operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        feed_targets: a dictionary of {feed_target_name: feed_target_data}
X
xuwei06 已提交
142 143
        feed_holder_name: the name of the variable that holds the data of
            all feed targets. The type of this feed_holder variable is
144 145 146
            FEED_MINIBATCH, which is essentially vector<LoDTensor>.

    Returns:
X
xuwei06 已提交
147
        A boolean value that indicates whether a block has feed operators
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
        that match the info contained in feed_targets and feed_holder_name.
    """

    feed_count = 0
    for op in block.ops:
        if op.desc.type() == 'feed':
            feed_count += 1
            assert op.desc.input('X')[0] == feed_holder_name
            feed_target_name = op.desc.output('Out')[0]
            if feed_target_name not in feed_targets:
                raise Exception("'feed_targets' does not have {} variable".
                                format(feed_target_name))
        else:
            break
    if feed_count > 0 and feed_count != len(feed_targets):
        raise Exception(
            "Feed operators in program desc do not match 'feed_targets'")
    return feed_count > 0


def has_fetch_operators(block, fetch_targets, fetch_holder_name):
    """ Check whether the block already has fetch operators.
X
xuwei06 已提交
170

171 172 173 174 175 176 177 178 179
    Return false if the block does not have any fetch operators.
    If some fetch operators have been appended to the block, check that
    the info contained in these fetch operators matches the fetch_targets
    and fetch_holder_name. Raise exception when any mismatch is found.
    Return true when the block has fetch operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
X
xuwei06 已提交
180 181 182
        fetch_holder_name: the name of the variable that holds the data of
            all fetch targets. The type of this fetch_holder variable is
            FETCH_LIST, which is essentially vector<LoDTensor>.
183

X
xuwei06 已提交
184 185 186
    Return:
        A boolean value that indicates whether a block has fetch operators
        that match the info contained in fetch_targets and fetch_holder_name.
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
    """

    fetch_count = 0
    for op in block.ops:
        if op.desc.type() == 'fetch':
            fetch_count += 1
            assert op.desc.output('Out')[0] == fetch_holder_name
            fetch_target_name = op.desc.input('X')[0]
            if fetch_target_name not in [
                    var.desc.name() for var in fetch_targets
            ]:
                raise Exception("'fetch_targets' does not have {} variable".
                                format(fetch_target_name))
            idx = op.desc.attr('col')
            assert fetch_target_name == fetch_targets[idx].desc.name()
    if fetch_count > 0 and fetch_count != len(fetch_targets):
        raise Exception(
            "Fetch operators in program desc do not match 'fetch_targets'")
    return fetch_count > 0


W
Wu Yi 已提交
208
def _fetch_var(name, scope=None, return_numpy=True):
X
xuwei06 已提交
209
    """
C
chengduoZH 已提交
210 211 212
    Fetch the value of the variable with the given name from the
    given scope.

X
xuwei06 已提交
213
    Args:
214 215 216 217
        name(str): name of the variable. Typically, only persistable variables
            can be found in the scope used for running the program.
        scope(core.Scope|None): scope object. It should be the scope where
            you pass to Executor.run() when running your program.
C
chengduoZH 已提交
218 219 220 221
            If None, global_scope() will be used. Default None.
        return_numpy(bool): whether convert the tensor to numpy.ndarray.
            Default True.

X
xuwei06 已提交
222 223 224 225 226 227
    Returns:
       LodTensor|numpy.ndarray
    """
    assert isinstance(name, str)
    if scope is None:
        scope = global_scope()
S
sneaxiy 已提交
228
    assert isinstance(scope, core._Scope)
X
xuwei06 已提交
229

Y
Yibing Liu 已提交
230
    var = scope.find_var(name)
231 232 233 234
    assert var is not None, (
        "Cannot find " + name + " in scope. Perhaps you need to make the"
        " variable persistable by using var.persistable = True in your"
        " program.")
X
xuwei06 已提交
235 236 237 238 239 240
    tensor = var.get_tensor()
    if return_numpy:
        tensor = as_numpy(tensor)
    return tensor


X
polish  
Xin Pan 已提交
241 242 243 244 245 246 247 248 249
def _to_name_str(var):
    if isinstance(var, Variable):
        return var.desc.name()
    elif isinstance(var, str):
        return var
    elif isinstance(var, six.string_types):
        return str(var)
    else:
        raise TypeError(str(var) + " should be Variable or str")
Q
qiaolongfei 已提交
250 251


252 253 254 255
def _get_strong_program_cache_key(program, feed, fetch_list):
    return str(id(program)) + _get_program_cache_key(feed, fetch_list)


X
polish  
Xin Pan 已提交
256 257 258
def _get_program_cache_key(feed, fetch_list):
    feed_var_names = list(feed.keys())
    fetch_var_names = list(map(_to_name_str, fetch_list))
Q
qiaolongfei 已提交
259 260 261 262

    return str(feed_var_names + fetch_var_names)


W
Wu Yi 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
def _as_lodtensor(data, place):
    """
        Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
        For higher dimensional sequence data, please use LoDTensor directly.

        Examples:
            >>> import paddle.fluid as fluid
            >>> place = fluid.CPUPlace()
            >>> exe = fluid.executor(place)
            >>> data = np.array(size=(100, 200, 300))
            >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
            >>>     ...

        Args:
            data(numpy.ndarray): a instance of array

        Returns:
            LoDTensor
        """
    if isinstance(data, list):
        raise RuntimeError("Some of your feed data hold LoD information. \
                They can not be completely cast from a list of Python \
                ndarray to LoDTensor. Please convert data to LoDTensor \
                directly before feeding the data.\
                ")
    # single tensor case
    tensor = core.LoDTensor()
    tensor.set(data, place)
    return tensor


Y
Yu Yang 已提交
294
class Executor(object):
295
    """
296 297 298 299 300 301 302 303 304 305 306
    An Executor in Python, supports single/multiple-GPU running,
    and single/multiple-CPU running. Python executor takes a program,
    adds feed operators and fetch operators to this program according
    to feed map and fetch_list. Feed map provides input data for the
    program. fetch_list provides the variables(or names) that user wants
    to get after program runs. Note: the executor will run all operators
    in the program but not only the operators dependent by the fetch_list.
    It stores the global variables into the global scope, and creates a
    local scope for the temporary variables. The contents in local scope
    may be discarded after every minibatch forward/backward finished.
    But the global scope variables will be persistent through different runs.
S
Fix doc  
sneaxiy 已提交
307

308
    Examples:
S
Fix doc  
sneaxiy 已提交
309 310
        .. code-block:: python

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
          import paddle.fluid as fluid
          import paddle.fluid.compiler as compiler
          import numpy
          import os

          use_cuda = True
          place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
          exe = fluid.Executor(place)

          train_program = fluid.Program()
          startup_program = fluid.Program()
          with fluid.program_guard(train_program, startup_program):
              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)

          # Run the startup program once and only once.
          # Not need to optimize/compile the startup program.
          startup_program.random_seed=1
          exe.run(startup_program)

          # Run the main program directly without compile.
          x = numpy.random.random(size=(10, 1)).astype('float32')
          loss_data, = exe.run(train_program,
                               feed={"X": x},
                               fetch_list=[loss.name])

          # Or, compiled the program and run. See `CompiledProgram`
          # for more detail.
          # NOTE: If you use CPU to run the program, you need
          # to specify the CPU_NUM, otherwise, fluid will use
          # all the number of the logic core as the CPU_NUM,
          # in that case, the batch size of the input should be
          # greater than CPU_NUM, if not, the process will be
          # failed by an exception.
          if not use_cuda:
              os.environ['CPU_NUM'] = str(2)

          compiled_prog = compiler.CompiledProgram(
              train_program).with_data_parallel(
              loss_name=loss.name)
          loss_data, = exe.run(compiled_prog,
                               feed={"X": x},
                               fetch_list=[loss.name])
X
add doc  
Xin Pan 已提交
356

357
    Args:
358 359
        place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.

360 361
    """

D
dzhwinter 已提交
362 363
    def __init__(self, place):
        self.place = place
Q
qiaolongfei 已提交
364
        self.program_caches = dict()
365
        self.ctx_caches = dict()
366 367
        self.scope_caches = dict()
        self.var_caches = dict()
368 369 370
        p = core.Place()
        p.set_place(self.place)
        self._default_executor = core.Executor(p)
Y
Yancey1989 已提交
371
        self._closed = False
D
dzhwinter 已提交
372

373 374 375 376 377 378
    def _get_var_cache(self, program_cache_key):
        return self.var_caches.get(program_cache_key, None)

    def _get_scope_cache(self, program_cache_key):
        return self.scope_caches.get(program_cache_key, None)

379 380 381
    def _get_ctx_cache(self, program_cache_key):
        return self.ctx_caches.get(program_cache_key, None)

Q
Qiao Longfei 已提交
382 383 384 385 386 387
    def _get_program_cache(self, program_cache_key):
        return self.program_caches.get(program_cache_key, None)

    def _add_program_cache(self, program_cache_key, program):
        self.program_caches[program_cache_key] = program

388 389 390
    def _add_ctx_cache(self, ctx_cache_key, ctx):
        self.ctx_caches[ctx_cache_key] = ctx

391 392 393 394 395 396
    def _add_scope_cache(self, scope_cache_key, scope):
        self.scope_caches[scope_cache_key] = scope

    def _add_var_cache(self, var_cache_key, var):
        self.var_caches[var_cache_key] = var

Q
Qiao Longfei 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
    def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
                            fetch_var_name):
        tmp_program = program.clone()

        global_block = tmp_program.global_block()

        if feed_var_name in global_block.vars:
            feed_var = global_block.var(feed_var_name)
        else:
            feed_var = global_block.create_var(
                name=feed_var_name,
                type=core.VarDesc.VarType.FEED_MINIBATCH,
                persistable=True)

        if fetch_var_name in global_block.vars:
            fetch_var = global_block.var(fetch_var_name)
        else:
            fetch_var = global_block.create_var(
                name=fetch_var_name,
                type=core.VarDesc.VarType.FETCH_LIST,
                persistable=True)

        # prepend feed operators
        if not has_feed_operators(global_block, feed, feed_var_name):
            for i, name in enumerate(feed):
                out = global_block.var(name)
W
Wu Yi 已提交
423
                global_block._prepend_op(
Q
Qiao Longfei 已提交
424 425 426 427 428 429 430 431
                    type='feed',
                    inputs={'X': [feed_var]},
                    outputs={'Out': [out]},
                    attrs={'col': i})

        # append fetch_operators
        if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
            for i, var in enumerate(fetch_list):
M
minqiyang 已提交
432 433 434
                assert isinstance(var, Variable) or isinstance(
                    var, six.string_types), (
                        "Wrong type for fetch_list[%s]: %s" % (i, type(var)))
Q
Qiao Longfei 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
                global_block.append_op(
                    type='fetch',
                    inputs={'X': [var]},
                    outputs={'Out': [fetch_var]},
                    attrs={'col': i})

        return tmp_program

    def _feed_data(self, program, feed, feed_var_name, scope):
        # feed var to framework
        for op in program.global_block().ops:
            if op.desc.type() == 'feed':
                feed_target_name = op.desc.output('Out')[0]
                cur_feed = feed[feed_target_name]
                if not isinstance(cur_feed, core.LoDTensor):
W
Wu Yi 已提交
450
                    cur_feed = _as_lodtensor(cur_feed, self.place)
Q
Qiao Longfei 已提交
451 452 453 454 455 456 457 458
                idx = op.desc.attr('col')
                core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
            else:
                break

    def _fetch_data(self, fetch_list, fetch_var_name, scope):
        outs = [
            core.get_fetch_variable(scope, fetch_var_name, i)
M
minqiyang 已提交
459
            for i in six.moves.range(len(fetch_list))
Q
Qiao Longfei 已提交
460 461 462
        ]
        return outs

S
Fix doc  
sneaxiy 已提交
463 464 465 466 467 468
    '''
    TODO(typhoonzero): Define "no longer use" meaning? Can user create
    a new Executor for the same program and run?
    TODO(panyx0718): Why ParallelExecutor doesn't have close?
    '''

Y
Yancey1989 已提交
469 470 471 472
    def close(self):
        """
        Close this executor.

X
fix  
Xin Pan 已提交
473
        You can no longer use this executor after calling this method.
474 475 476 477 478 479 480 481 482 483 484 485
        For the distributed training, this method would free the resource
        on PServers related to the current Trainer.

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid

              cpu = fluid.CPUPlace()
              exe = fluid.Executor(cpu)
              # execute training or testing
              exe.close()
Y
Yancey1989 已提交
486
        """
487 488
        if not self._closed:
            self._default_executor.close()
Y
Yancey1989 已提交
489
            self._closed = True
Y
Yancey1989 已提交
490

X
fix  
Xin Pan 已提交
491
    def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
X
polish  
Xin Pan 已提交
492
                      return_numpy):
493
        exe = program._executor
494 495 496 497 498 499 500 501 502 503 504
        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

505
            exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
506
        elif isinstance(feed, list) or isinstance(feed, tuple):
X
fix  
Xin Pan 已提交
507
            if len(feed) != len(program._places):
508 509 510 511 512 513 514 515 516 517 518 519 520 521
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()
            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
X
fix  
Xin Pan 已提交
522
                        tmp.set(tensor, program._places[i])
523 524 525
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
526
            exe.feed_tensors_into_local_scopes(res)
527

X
polish  
Xin Pan 已提交
528
        fetch_var_names = list(map(_to_name_str, fetch_list))
529
        exe.run(fetch_var_names, fetch_var_name)
530 531 532 533 534 535
        arr = scope.find_var(fetch_var_name).get_lod_tensor_array()

        if return_numpy:
            return as_numpy(arr)
        return [arr[i] for i in range(len(arr))]

Y
Yu Yang 已提交
536
    def run(self,
Y
Yu Yang 已提交
537
            program=None,
538 539
            feed=None,
            fetch_list=None,
Y
Yu Yang 已提交
540
            feed_var_name='feed',
Y
Yu Yang 已提交
541
            fetch_var_name='fetch',
D
dzhwinter 已提交
542
            scope=None,
543 544
            return_numpy=True,
            use_program_cache=False):
545
        """
546 547 548 549
        Run program by this Executor. Feed data by feed map, fetch result by
        fetch_list. Python executor takes a program, add feed operators and
        fetch operators to this program according to feed map and fetch_list.
        Feed map provides input data for the program. fetch_list provides
550 551
        the variables(or names) that user want to get after program run.

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
        Note: the executor will run all operators in the program but not
        only the operators dependent by the fetch_list.

        Examples:
            .. code-block:: python

              import paddle.fluid as fluid
              import numpy

              # First create the Executor.
              place = fluid.CPUPlace() # fluid.CUDAPlace(0)
              exe = fluid.Executor(place)

              data = fluid.layers.data(name='X', shape=[1], dtype='float32')
              hidden = fluid.layers.fc(input=data, size=10)
              loss = fluid.layers.mean(hidden)
              adam = fluid.optimizer.Adam()
              adam.minimize(loss)

              # Run the startup program once and only once.
              exe.run(fluid.default_startup_program())

              x = numpy.random.random(size=(10, 1)).astype('float32')
              outs = exe.run(feed={'X': x},
                             fetch_list=[loss.name])
Q
qiaolongfei 已提交
577

578
        Args:
X
add doc  
Xin Pan 已提交
579
            program(Program|CompiledProgram): the program that need to run,
X
fix  
Xin Pan 已提交
580
                if not provided, then default_main_program (not compiled) will be used.
X
add doc  
Xin Pan 已提交
581
            feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
Z
Zeng Jinle 已提交
582 583 584 585 586 587 588 589
            fetch_list(list): a list of variable or variable names that user 
                wants to get, this method will return them according to this list.
            feed_var_name(str): the name for the input variable of 
                feed Operator.
            fetch_var_name(str): the name for the output variable of 
                fetch Operator.
            scope(Scope): the scope used to run this program, you can switch 
                it to different scope. default is global_scope
590
            return_numpy(bool): if convert the fetched tensor to numpy
Z
Zeng Jinle 已提交
591 592 593 594 595 596
            use_program_cache(bool): whether to use the cached program 
                settings across batches. Setting it be true would be faster 
                only when (1) the program is not compiled with data parallel, 
                and (2) program, feed variable names and fetch_list variable 
                names do not changed compared to the last step. 
                
597 598 599
        Returns:

            list(numpy.array): fetch result according to fetch_list.
600
        """
C
chengduo 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
        try:
            return self._run_impl(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)
        except Exception as e:
            if not isinstance(e, core.EOFException):
                print("An exception was thrown!\n {}".format(str(e)))
            raise e

    def _run_impl(self, program, feed, fetch_list, feed_var_name,
                  fetch_var_name, scope, return_numpy, use_program_cache):
Y
Yancey1989 已提交
618 619 620 621

        if self._closed:
            raise RuntimeError("Attempted to use a closed Executor")

622 623
        if scope is None:
            scope = global_scope()
X
polish  
Xin Pan 已提交
624 625
        if fetch_list is None:
            fetch_list = []
626

X
polish  
Xin Pan 已提交
627 628
        compiled = isinstance(program, compiler.CompiledProgram)
        # For backward compatibility, run directly.
629
        if not compiled:
C
chengduo 已提交
630
            return self._run_program(
631
                program,
632
                self._default_executor,
633 634 635 636 637 638 639 640 641 642 643
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)

        program._compile(scope, self.place)
        if program._is_data_parallel:
            return self._run_parallel(
X
fix  
Xin Pan 已提交
644
                program,
645 646 647
                scope=scope,
                feed=feed,
                fetch_list=fetch_list,
X
polish  
Xin Pan 已提交
648
                fetch_var_name=fetch_var_name,
649
                return_numpy=return_numpy)
F
flame 已提交
650
        elif program._is_inference:
X
Xin Pan 已提交
651
            return self._run_inference(program._executor, feed)
652
        else:
X
Xin Pan 已提交
653 654
            # TODO(panyx0718): Can compile program to optimize executor
            # performance.
X
Xin Pan 已提交
655
            # TODO(panyx0718): executor should be able to run graph.
X
Xin Pan 已提交
656
            assert program._program, "CompiledProgram is compiled from graph, can only run with_data_parallel."
657
            # use_program_cache is not valid with CompiledProgram
C
chengduo 已提交
658
            return self._run_program(
659
                program._program,
660
                self._default_executor,
661 662 663 664 665 666
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
667
                use_program_cache=False)
668

C
chengduo 已提交
669 670
    def _run_program(self, program, exe, feed, fetch_list, feed_var_name,
                     fetch_var_name, scope, return_numpy, use_program_cache):
671

672 673
        if feed is None:
            feed = {}
S
sneaxiy 已提交
674 675 676 677
        elif isinstance(feed, (list, tuple)):
            assert len(feed) == 1, "Not compiled with data parallel"
            feed = feed[0]

Q
qiaolongfei 已提交
678
        if not isinstance(feed, dict):
D
dzhwinter 已提交
679 680 681
            raise TypeError(
                "feed requires dict as its Parameter. But you passed in %s" %
                (type(feed)))
Y
Yu Yang 已提交
682
        if program is None:
Y
Yu Yang 已提交
683
            program = default_main_program()
Y
Yu Yang 已提交
684

Y
Yu Yang 已提交
685
        if not isinstance(program, Program):
D
dzhwinter 已提交
686 687 688
            raise TypeError(
                "Executor requires Program as its Parameter. But you passed in %s"
                % (type(program)))
Y
Yu Yang 已提交
689

690
        if use_program_cache:
691
            cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
Q
Qiao Longfei 已提交
692
            cached_program = self._get_program_cache(cache_key)
693
            cached_ctx = self._get_ctx_cache(cache_key)
694 695
            cached_scope = self._get_scope_cache(cache_key)
            cached_var = self._get_var_cache(cache_key)
Q
Qiao Longfei 已提交
696 697 698 699 700 701 702 703
            if cached_program is None:
                cached_program = self._add_feed_fetch_ops(
                    program=program,
                    feed=feed,
                    fetch_list=fetch_list,
                    feed_var_name=feed_var_name,
                    fetch_var_name=fetch_var_name)
                self._add_program_cache(cache_key, cached_program)
704
                fetch_list_str = list(map(_to_name_str, fetch_list))
705
                cached_ctx = self._default_executor.prepare_ctx_cache(
706 707 708 709 710 711 712 713 714
                    cached_program.desc, 0, fetch_list_str, False)
                cached_var = self._default_executor.create_variables(
                    cached_program.desc, scope, 0)
                # currently, we cache program, vars, sub_scope here
                # we suppose that in a life cycle of training, a user
                # will not create many programs. So, here the basic
                # rule of caching is to cache all unseen (program, var, scope)
                # when a user use use_program_cache.
                cached_scope = scope.new_scope()
715
                self._add_ctx_cache(cache_key, cached_ctx)
716 717
                self._add_var_cache(cache_key, cached_var)
                self._add_scope_cache(cache_key, cached_scope)
Q
Qiao Longfei 已提交
718
            program = cached_program
719
            ctx = cached_ctx
720 721
            scope = cached_scope
            var = cached_var
722
        else:
Q
Qiao Longfei 已提交
723 724 725 726 727 728 729 730
            program = self._add_feed_fetch_ops(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name)

        self._feed_data(program, feed, feed_var_name, scope)
731 732 733
        if not use_program_cache:
            exe.run(program.desc, scope, 0, True, True, fetch_var_name)
        else:
734
            exe.run_cached_prepared_ctx(ctx, scope, False, False, False)
Q
Qiao Longfei 已提交
735
        outs = self._fetch_data(fetch_list, fetch_var_name, scope)
D
dzhwinter 已提交
736 737 738
        if return_numpy:
            outs = as_numpy(outs)
        return outs
F
flame 已提交
739

X
Xin Pan 已提交
740 741
    def _run_inference(self, exe, feed):
        return exe.run(feed)
D
dongdaxiang 已提交
742

743 744
    def _dump_debug_info(self, program=None, trainer=None):
        with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
H
hutuxian 已提交
745
            fout.write(str(trainer))
746 747 748 749
        if program._fleet_opt:
            with open("fleet_desc.prototxt", "w") as fout:
                fout.write(str(program._fleet_opt["fleet_desc"]))

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
    def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
        filelist_length = len(dataset.dataset.get_filelist())
        if filelist_length < pipeline_num:
            pipeline_num = filelist_length
            print(
                "Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
                % (filelist_length, filelist_length))
        if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
            print(
                "Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
                % (filelist_length // pipeline_num, filelist_length))
            pipeline_opt["concurrency_list"][
                0] = filelist_length // pipeline_num
        dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
        return pipeline_num

766 767 768 769 770 771 772 773 774
    def _prepare_trainer(self,
                         program=None,
                         dataset=None,
                         scope=None,
                         thread=0,
                         debug=False,
                         fetch_list=None,
                         fetch_info=None,
                         print_period=100):
D
dongdaxiang 已提交
775 776 777 778
        if scope is None:
            scope = global_scope()
        if fetch_list is None:
            fetch_list = []
D
dongdaxiang 已提交
779 780 781
        if fetch_info is None:
            fetch_info = []
        assert len(fetch_list) == len(fetch_info)
D
dongdaxiang 已提交
782 783
        compiled = isinstance(program, compiler.CompiledProgram)
        if not compiled:
H
hutuxian 已提交
784 785 786 787 788 789
            # TODO: Need a better way to distinguish and specify different execution mode
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(program._fleet_opt)
790
            trainer._set_program(program)
791
        else:
H
hutuxian 已提交
792 793 794 795 796 797
            if program._pipeline_opt:
                trainer = TrainerFactory()._create_trainer(
                    program.program._pipeline_opt)
            else:
                trainer = TrainerFactory()._create_trainer(
                    program.program._fleet_opt)
798
            trainer._set_program(program.program)
H
hutuxian 已提交
799 800

        # The following thread_num-determined logic will be deprecated
801
        if thread <= 0:
D
dongdaxiang 已提交
802 803
            if dataset.thread_num <= 0:
                raise RuntimeError(
804 805
                    "You should set thread num first, either in Dataset"
                    "or in Executor.train_from_dataset")
D
dongdaxiang 已提交
806
            else:
807
                trainer._set_thread(dataset.thread_num)
808
        else:
809
            trainer._set_thread(thread)
H
hutuxian 已提交
810

811 812
        trainer._set_debug(debug)
        trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
813
        return scope, trainer
814 815 816 817 818 819

    def infer_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
820 821 822 823
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
                           print_period=100):
824 825 826 827 828 829
        """
        The document of infer_from_dataset is almost the same as
        train_from_dataset, except that in distributed training,
        push gradients will be disabled in infer_from_dataset.
        infer_from_dataset() can be used for evaluation in multi-thread
        very easily.
830

831 832 833 834 835
        Args:
            program(Program|CompiledProgram): the program that needs to be run,
               if not provided, then default_main_program (not compiled) will be used.
            dataset(paddle.fluid.Dataset): dataset created outside this function,
               a user should provide a well-defined dataset before calling this function.
836
               Please check the document of Dataset if needed. default is None
837 838 839
            scope(Scope): the scope used to run this program, you can switch it to different scope
               for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. The actual number
840 841
               of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
            debug(bool): whether a user wants to run infer_from_dataset, default is False
842
            fetch_list(Variable List): fetch variable list, each variable
843 844 845
                                       will be printed during training, default is None
            fetch_info(String List): print information for each variable, default is None
            print_period(int): the number of mini-batches for each print, default is 100
846

847 848 849 850
        Returns:
            None

        Examples:
851 852

            .. code-block:: python
853

854
                import paddle.fluid as fluid
855 856

                place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
857
                exe = fluid.Executor(place)
858 859
                x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
                y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
860 861
                dataset = fluid.DatasetFactory().create_dataset()
                dataset.set_use_var([x, y])
862 863
                dataset.set_thread(1)
                filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
864 865 866 867
                dataset.set_filelist(filelist)
                exe.run(fluid.default_startup_program())
                exe.infer_from_dataset(program=fluid.default_main_program(),
                                       dataset=dataset)        
868

869
        """
870 871 872
        if dataset == None:
            raise RuntimeError("dataset is needed and should be initialized")

J
jiaqi 已提交
873
        dataset._prepare_to_run()
874
        scope, trainer = self._prepare_trainer(
875 876 877 878 879 880 881 882
            program=program,
            dataset=dataset,
            scope=scope,
            thread=thread,
            debug=debug,
            fetch_list=fetch_list,
            fetch_info=fetch_info,
            print_period=print_period)
883
        trainer._set_infer(True)
884
        trainer._gen_trainer_desc()
885
        self._dump_debug_info(program=program, trainer=trainer)
886 887 888
        self._default_executor.run_from_dataset(program.desc, scope,
                                                dataset.dataset,
                                                trainer._desc())
J
jiaqi 已提交
889
        dataset._finish_to_run()
890
        return None
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

    def train_from_dataset(self,
                           program=None,
                           dataset=None,
                           scope=None,
                           thread=0,
                           debug=False,
                           fetch_list=None,
                           fetch_info=None,
                           print_period=100):
        """
        Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
        Given a program, either a program or compiled program, train_from_dataset will
        consume all data samples in dataset. Input scope can be given by users. By default,
        scope is global_scope(). The total number of thread run in training is `thread`.
        Thread number used in training will be minimum value of threadnum in Dataset and
        the value of thread in this interface. Debug can be set so that executor will display
        Run-Time for all operators and the throughputs of current training task.
        
        Note: train_from_dataset will destroy all resources created within executor for each run.

        Args:
            program(Program|CompiledProgram): the program that needs to be run,
               if not provided, then default_main_program (not compiled) will be used.
            dataset(paddle.fluid.Dataset): dataset created outside this function,
               a user should provide a well-defined dataset before calling this function.
               Please check the document of Dataset if needed.
            scope(Scope): the scope used to run this program, you can switch it to different scope
               for each run. default is global_scope
            thread(int): number of thread a user wants to run in this function. The actual number
               of thread will be min(Dataset.thread_num, thread)
            debug(bool): whether a user wants to run train_from_dataset 
            fetch_list(Variable List): fetch variable list, each variable
                                       will be printed during training
            fetch_info(String List): print information for each variable
            print_period(int): the number of mini-batches for each print
927 928 929

        Returns:
            None
930
        
931
        Examples:
932
        
933 934 935
            .. code-block:: python

              import paddle.fluid as fluid
936 937

              place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
938
              exe = fluid.Executor(place)
939 940
              x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
              y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
941 942
              dataset = fluid.DatasetFactory().create_dataset()
              dataset.set_use_var([x, y])
943 944
              dataset.set_thread(1)
              filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
945 946 947 948
              dataset.set_filelist(filelist)
              exe.run(fluid.default_startup_program())
              exe.train_from_dataset(program=fluid.default_main_program(),
                                     dataset=dataset)
949 950

        """
951 952 953
        if dataset == None:
            raise RuntimeError("dataset is need and should be initialized")

H
hutuxian 已提交
954
        if program._pipeline_opt:
955 956 957
            thread = self._adjust_pipeline_resource(program._pipeline_opt,
                                                    dataset, thread)

J
jiaqi 已提交
958
        dataset._prepare_to_run()
959
        scope, trainer = self._prepare_trainer(
960 961 962 963 964 965 966 967
            program=program,
            dataset=dataset,
            scope=scope,
            thread=thread,
            debug=debug,
            fetch_list=fetch_list,
            fetch_info=fetch_info,
            print_period=print_period)
968
        trainer._gen_trainer_desc()
969
        self._dump_debug_info(program=program, trainer=trainer)
D
dongdaxiang 已提交
970 971 972
        self._default_executor.run_from_dataset(program.desc, scope,
                                                dataset.dataset,
                                                trainer._desc())
J
jiaqi 已提交
973
        dataset._finish_to_run()
974
        return None