executor.py 22.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17 18
import os
import multiprocessing
D
dzhwinter 已提交
19
import numpy as np
Y
Yang Yu 已提交
20
import contextlib
21
import six
22
from .framework import Program, default_main_program, Variable
23
from . import core
24 25
from . import compiler
from .. import compat as cpt
26

T
Tink_Y 已提交
27
__all__ = ['Executor', 'global_scope', 'scope_guard']
Y
Yu Yang 已提交
28

Y
Yu Yang 已提交
29
g_scope = core.Scope()
F
flame 已提交
30 31
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33

Y
Yang Yu 已提交
34
def global_scope():
Y
yuyang18 已提交
35 36 37 38 39 40 41
    """
    Get the global/default scope instance. There are a lot of APIs use
    :code:`global_scope` as its default value, e.g., :code:`Executor.run`

    Returns:
        Scope: The global/default scope instance.
    """
Y
Yang Yu 已提交
42 43 44
    return g_scope


45
def _switch_scope(scope):
Y
Yang Yu 已提交
46 47 48 49 50 51 52 53
    global g_scope
    ex = g_scope
    g_scope = scope
    return ex


@contextlib.contextmanager
def scope_guard(scope):
Y
yuyang18 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66
    """
    Change the global/default scope instance by Python `with` statement. All
    variable in runtime will assigned to the new scope.

    Examples:
        >>> import paddle.fluid as fluid
        >>> new_scope = fluid.Scope()
        >>> with fluid.scope_guard(new_scope):
        >>>     ...

    Args:
        scope: The new global/default scope.
    """
67
    ex = _switch_scope(scope)
Y
Yang Yu 已提交
68
    yield
69
    _switch_scope(ex)
Y
Yang Yu 已提交
70 71


D
dzhwinter 已提交
72
def as_numpy(tensor):
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    """
    Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
    For higher dimensional sequence data, please use LoDTensor directly.
    Examples:
        >>> import paddle.fluid as fluid
        >>> outs = executor.run(...)
        >>> np_outs = map(lambda x: as_numpy(x), outs)
        >>>     ...

    Args:
       tensor(Variable): a instance of Tensor

    Returns:
        numpy.ndarray
    """
C
chengduo 已提交
88 89
    if isinstance(tensor, core.LoDTensorArray):
        return [as_numpy(t) for t in tensor]
D
dzhwinter 已提交
90 91 92 93
    if isinstance(tensor, list):
        return [as_numpy(t) for t in tensor]
    assert isinstance(tensor, core.LoDTensor)
    lod = tensor.lod()
94
    if len(lod) > 0:
D
dzhwinter 已提交
95
        raise RuntimeError("Some of your fetched tensors hold LoD information. \
96 97 98 99
            They can not be completely cast to Python ndarray. \
            Please set the parameter 'return_numpy' as 'False' to \
            return LoDTensor itself directly.")
    return np.array(tensor)
D
dzhwinter 已提交
100 101


102 103 104 105 106 107 108 109 110 111 112 113
def has_feed_operators(block, feed_targets, feed_holder_name):
    """ Check whether the block already has feed operators.

    Return false if the block does not have any feed operators.
    If some feed operators have been prepended to the block, check that
    the info contained in these feed operators matches the feed_targets
    and feed_holder_name. Raise exception when any mismatch is found.
    Return true when the block has feed operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        feed_targets: a dictionary of {feed_target_name: feed_target_data}
X
xuwei06 已提交
114 115
        feed_holder_name: the name of the variable that holds the data of
            all feed targets. The type of this feed_holder variable is
116 117 118
            FEED_MINIBATCH, which is essentially vector<LoDTensor>.

    Returns:
X
xuwei06 已提交
119
        A boolean value that indicates whether a block has feed operators
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
        that match the info contained in feed_targets and feed_holder_name.
    """

    feed_count = 0
    for op in block.ops:
        if op.desc.type() == 'feed':
            feed_count += 1
            assert op.desc.input('X')[0] == feed_holder_name
            feed_target_name = op.desc.output('Out')[0]
            if feed_target_name not in feed_targets:
                raise Exception("'feed_targets' does not have {} variable".
                                format(feed_target_name))
        else:
            break
    if feed_count > 0 and feed_count != len(feed_targets):
        raise Exception(
            "Feed operators in program desc do not match 'feed_targets'")
    return feed_count > 0


def has_fetch_operators(block, fetch_targets, fetch_holder_name):
    """ Check whether the block already has fetch operators.
X
xuwei06 已提交
142

143 144 145 146 147 148 149 150 151
    Return false if the block does not have any fetch operators.
    If some fetch operators have been appended to the block, check that
    the info contained in these fetch operators matches the fetch_targets
    and fetch_holder_name. Raise exception when any mismatch is found.
    Return true when the block has fetch operators with matching info.

    Args:
        block: a block instance (typically global block of a program)
        fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
X
xuwei06 已提交
152 153 154
        fetch_holder_name: the name of the variable that holds the data of
            all fetch targets. The type of this fetch_holder variable is
            FETCH_LIST, which is essentially vector<LoDTensor>.
155

X
xuwei06 已提交
156 157 158
    Return:
        A boolean value that indicates whether a block has fetch operators
        that match the info contained in fetch_targets and fetch_holder_name.
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
    """

    fetch_count = 0
    for op in block.ops:
        if op.desc.type() == 'fetch':
            fetch_count += 1
            assert op.desc.output('Out')[0] == fetch_holder_name
            fetch_target_name = op.desc.input('X')[0]
            if fetch_target_name not in [
                    var.desc.name() for var in fetch_targets
            ]:
                raise Exception("'fetch_targets' does not have {} variable".
                                format(fetch_target_name))
            idx = op.desc.attr('col')
            assert fetch_target_name == fetch_targets[idx].desc.name()
    if fetch_count > 0 and fetch_count != len(fetch_targets):
        raise Exception(
            "Fetch operators in program desc do not match 'fetch_targets'")
    return fetch_count > 0


W
Wu Yi 已提交
180
def _fetch_var(name, scope=None, return_numpy=True):
X
xuwei06 已提交
181
    """
C
chengduoZH 已提交
182 183 184
    Fetch the value of the variable with the given name from the
    given scope.

X
xuwei06 已提交
185
    Args:
186 187 188 189
        name(str): name of the variable. Typically, only persistable variables
            can be found in the scope used for running the program.
        scope(core.Scope|None): scope object. It should be the scope where
            you pass to Executor.run() when running your program.
C
chengduoZH 已提交
190 191 192 193
            If None, global_scope() will be used. Default None.
        return_numpy(bool): whether convert the tensor to numpy.ndarray.
            Default True.

X
xuwei06 已提交
194 195 196 197 198 199
    Returns:
       LodTensor|numpy.ndarray
    """
    assert isinstance(name, str)
    if scope is None:
        scope = global_scope()
S
sneaxiy 已提交
200
    assert isinstance(scope, core._Scope)
X
xuwei06 已提交
201

Y
Yibing Liu 已提交
202
    var = scope.find_var(name)
203 204 205 206
    assert var is not None, (
        "Cannot find " + name + " in scope. Perhaps you need to make the"
        " variable persistable by using var.persistable = True in your"
        " program.")
X
xuwei06 已提交
207 208 209 210 211 212
    tensor = var.get_tensor()
    if return_numpy:
        tensor = as_numpy(tensor)
    return tensor


X
polish  
Xin Pan 已提交
213 214 215 216 217 218 219 220 221
def _to_name_str(var):
    if isinstance(var, Variable):
        return var.desc.name()
    elif isinstance(var, str):
        return var
    elif isinstance(var, six.string_types):
        return str(var)
    else:
        raise TypeError(str(var) + " should be Variable or str")
Q
qiaolongfei 已提交
222 223


X
polish  
Xin Pan 已提交
224 225 226
def _get_program_cache_key(feed, fetch_list):
    feed_var_names = list(feed.keys())
    fetch_var_names = list(map(_to_name_str, fetch_list))
Q
qiaolongfei 已提交
227 228 229 230

    return str(feed_var_names + fetch_var_names)


W
Wu Yi 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
def _as_lodtensor(data, place):
    """
        Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
        For higher dimensional sequence data, please use LoDTensor directly.

        Examples:
            >>> import paddle.fluid as fluid
            >>> place = fluid.CPUPlace()
            >>> exe = fluid.executor(place)
            >>> data = np.array(size=(100, 200, 300))
            >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
            >>>     ...

        Args:
            data(numpy.ndarray): a instance of array

        Returns:
            LoDTensor
        """
    if isinstance(data, list):
        raise RuntimeError("Some of your feed data hold LoD information. \
                They can not be completely cast from a list of Python \
                ndarray to LoDTensor. Please convert data to LoDTensor \
                directly before feeding the data.\
                ")
    # single tensor case
    tensor = core.LoDTensor()
    tensor.set(data, place)
    return tensor


Y
Yu Yang 已提交
262
class Executor(object):
263 264 265 266 267 268 269
    """
    An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
    ParallelExecutor.
    Python executor takes a program, add feed operators and fetch operators to this program according
    to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
    the variables(or names) that user want to get after program run. Note: the executor will run all
    operators in the program but not only the operators dependent by the fetch_list.
270 271
    It store the global variables into the global scope, and create a local scope for the temporary
    variables. The local scope contents will be discarded after every minibatch forward/backward finished.
272 273 274
    But the global scope variables will be persistent through different runs.
    All of ops in program will be running in sequence.

X
add doc  
Xin Pan 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

    Example:
    .. code-block:: python
        # First create the Executor.
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)

        # Run the startup program once and only once.
        # Not need to optimize/compile the startup program.
        exe.run(fluid.default_startup_program())

        # Run the main program directly without compile.
        loss, = exe.run(fluid.default_main_program(),
                        feed=feed_dict,
                        fetch_list=[loss.name])
        # Or, compiled the program and run. See `CompiledProgram` for more detail.
        compiled_prog = compiler.CompiledProgram(
            fluid.default_main_program()).with_data_parallel(
            loss_name=loss.name)
        loss, = exe.run(compiled_prog,
                        feed=feed_dict,
                        fetch_list=[loss.name])

298 299 300 301 302 303 304
    Args:
        place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device

    Note: For debugging complicated network in parallel-GPUs, you can test it on the executor.
    They has the exactly same arguments, and expected the same results.
    """

D
dzhwinter 已提交
305 306
    def __init__(self, place):
        self.place = place
Q
qiaolongfei 已提交
307
        self.program_caches = dict()
308 309 310
        p = core.Place()
        p.set_place(self.place)
        self._default_executor = core.Executor(p)
Y
Yancey1989 已提交
311
        self._closed = False
D
dzhwinter 已提交
312

Q
Qiao Longfei 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
    def _get_program_cache(self, program_cache_key):
        return self.program_caches.get(program_cache_key, None)

    def _add_program_cache(self, program_cache_key, program):
        self.program_caches[program_cache_key] = program

    def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
                            fetch_var_name):
        tmp_program = program.clone()

        global_block = tmp_program.global_block()

        if feed_var_name in global_block.vars:
            feed_var = global_block.var(feed_var_name)
        else:
            feed_var = global_block.create_var(
                name=feed_var_name,
                type=core.VarDesc.VarType.FEED_MINIBATCH,
                persistable=True)

        if fetch_var_name in global_block.vars:
            fetch_var = global_block.var(fetch_var_name)
        else:
            fetch_var = global_block.create_var(
                name=fetch_var_name,
                type=core.VarDesc.VarType.FETCH_LIST,
                persistable=True)

        # prepend feed operators
        if not has_feed_operators(global_block, feed, feed_var_name):
            for i, name in enumerate(feed):
                out = global_block.var(name)
W
Wu Yi 已提交
345
                global_block._prepend_op(
Q
Qiao Longfei 已提交
346 347 348 349 350 351 352 353
                    type='feed',
                    inputs={'X': [feed_var]},
                    outputs={'Out': [out]},
                    attrs={'col': i})

        # append fetch_operators
        if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
            for i, var in enumerate(fetch_list):
M
minqiyang 已提交
354 355 356
                assert isinstance(var, Variable) or isinstance(
                    var, six.string_types), (
                        "Wrong type for fetch_list[%s]: %s" % (i, type(var)))
Q
Qiao Longfei 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
                global_block.append_op(
                    type='fetch',
                    inputs={'X': [var]},
                    outputs={'Out': [fetch_var]},
                    attrs={'col': i})

        return tmp_program

    def _feed_data(self, program, feed, feed_var_name, scope):
        # feed var to framework
        for op in program.global_block().ops:
            if op.desc.type() == 'feed':
                feed_target_name = op.desc.output('Out')[0]
                cur_feed = feed[feed_target_name]
                if not isinstance(cur_feed, core.LoDTensor):
W
Wu Yi 已提交
372
                    cur_feed = _as_lodtensor(cur_feed, self.place)
Q
Qiao Longfei 已提交
373 374 375 376 377 378 379 380
                idx = op.desc.attr('col')
                core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
            else:
                break

    def _fetch_data(self, fetch_list, fetch_var_name, scope):
        outs = [
            core.get_fetch_variable(scope, fetch_var_name, i)
M
minqiyang 已提交
381
            for i in six.moves.range(len(fetch_list))
Q
Qiao Longfei 已提交
382 383 384
        ]
        return outs

Y
Yancey1989 已提交
385 386 387 388
    def close(self):
        """
        Close this executor.

X
fix  
Xin Pan 已提交
389
        You can no longer use this executor after calling this method.
Y
Yancey1989 已提交
390 391
        For the distributed training, this method would free the resource on PServers related to
        the current Trainer.
X
fix  
Xin Pan 已提交
392 393
        TODO(typhoonzero): Define "no longer use" meaning? Can user create
        a new Executor for the same program and run?
394
        TODO(panyx0718): Why ParallelExecutor doesn't have close?
Y
Yancey1989 已提交
395 396 397 398 399 400 401

        Example:
            >>> cpu = core.CPUPlace()
            >>> exe = Executor(cpu)
            >>> ...
            >>> exe.close()
        """
402 403
        if not self._closed:
            self._default_executor.close()
Y
Yancey1989 已提交
404
            self._closed = True
Y
Yancey1989 已提交
405

X
fix  
Xin Pan 已提交
406
    def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
X
polish  
Xin Pan 已提交
407
                      return_numpy):
408
        exe = program._executor
409 410 411 412 413 414 415 416 417 418 419
        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

420
            exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
421
        elif isinstance(feed, list) or isinstance(feed, tuple):
X
fix  
Xin Pan 已提交
422
            if len(feed) != len(program._places):
423 424 425 426 427 428 429 430 431 432 433 434 435 436
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()
            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
X
fix  
Xin Pan 已提交
437
                        tmp.set(tensor, program._places[i])
438 439 440
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
441
            exe.feed_tensors_into_local_scopes(res)
442

X
polish  
Xin Pan 已提交
443
        fetch_var_names = list(map(_to_name_str, fetch_list))
444
        exe.run(fetch_var_names, fetch_var_name)
445 446 447 448 449 450
        arr = scope.find_var(fetch_var_name).get_lod_tensor_array()

        if return_numpy:
            return as_numpy(arr)
        return [arr[i] for i in range(len(arr))]

Y
Yu Yang 已提交
451
    def run(self,
Y
Yu Yang 已提交
452
            program=None,
453 454
            feed=None,
            fetch_list=None,
Y
Yu Yang 已提交
455
            feed_var_name='feed',
Y
Yu Yang 已提交
456
            fetch_var_name='fetch',
D
dzhwinter 已提交
457
            scope=None,
458 459
            return_numpy=True,
            use_program_cache=False):
460 461
        """
        Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
Q
qiaolongfei 已提交
462 463
        Python executor takes a program, add feed operators and fetch operators to this program according
        to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
464 465 466
        the variables(or names) that user want to get after program run.

        Note: the executor will run all
Q
qiaolongfei 已提交
467 468
        operators in the program but not only the operators dependent by the fetch_list

469
        Args:
X
add doc  
Xin Pan 已提交
470
            program(Program|CompiledProgram): the program that need to run,
X
fix  
Xin Pan 已提交
471
                if not provided, then default_main_program (not compiled) will be used.
X
add doc  
Xin Pan 已提交
472
            feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
473 474 475 476 477 478 479 480 481 482 483 484 485 486
            fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list.
            feed_var_name(str): the name for the input variable of feed Operator.
            fetch_var_name(str): the name for the output variable of fetch Operator.
            scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope
            return_numpy(bool): if convert the fetched tensor to numpy
            use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step.

        Returns:

            list(numpy.array): fetch result according to fetch_list.


        Examples:

T
Tink_Y 已提交
487 488 489 490 491
            >>> data = fluid.layers.data(name='X', shape=[1], dtype='float32')
            >>> out = fluid.layers.create_tensor(dtype='float32')
            >>> hidden = fluid.layers.fc(input=data, size=10)
            >>> fluid.layers.assign(hidden,out)
            >>> loss = fluid.layers.mean(out)
492
            >>> adam = fluid.optimizer.Adam()
T
Tink_Y 已提交
493
						>>> adam.minimize(loss)
494 495

            >>> cpu = core.CPUPlace()
T
Tink_Y 已提交
496 497
            >>> exe = fluid.Executor(cpu)
            >>> exe.run(fluid.default_startup_program())
498 499 500 501 502

            >>> x = numpy.random.random(size=(10, 1)).astype('float32')
            >>> outs = exe.run(
            >>>     feed={'X': x},
            >>>     fetch_list=[loss.name])
503
        """
Y
Yancey1989 已提交
504 505 506 507

        if self._closed:
            raise RuntimeError("Attempted to use a closed Executor")

508 509
        if scope is None:
            scope = global_scope()
X
polish  
Xin Pan 已提交
510 511
        if fetch_list is None:
            fetch_list = []
512

X
polish  
Xin Pan 已提交
513 514
        compiled = isinstance(program, compiler.CompiledProgram)
        # For backward compatibility, run directly.
515 516 517
        if not compiled:
            return self._run(
                program,
518
                self._default_executor,
519 520 521 522 523 524 525 526 527 528 529
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)

        program._compile(scope, self.place)
        if program._is_data_parallel:
            return self._run_parallel(
X
fix  
Xin Pan 已提交
530
                program,
531 532 533
                scope=scope,
                feed=feed,
                fetch_list=fetch_list,
X
polish  
Xin Pan 已提交
534
                fetch_var_name=fetch_var_name,
535
                return_numpy=return_numpy)
F
flame 已提交
536
        elif program._is_inference:
X
Xin Pan 已提交
537
            return self._run_inference(program._executor, feed)
538
        else:
X
Xin Pan 已提交
539 540
            # TODO(panyx0718): Can compile program to optimize executor
            # performance.
541 542
            return self._run(
                program._program,
543
                self._default_executor,
544 545 546 547 548 549 550 551
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name,
                scope=scope,
                return_numpy=return_numpy,
                use_program_cache=use_program_cache)

552 553
    def _run(self, program, exe, feed, fetch_list, feed_var_name,
             fetch_var_name, scope, return_numpy, use_program_cache):
554

555 556
        if feed is None:
            feed = {}
Q
qiaolongfei 已提交
557
        if not isinstance(feed, dict):
D
dzhwinter 已提交
558 559 560
            raise TypeError(
                "feed requires dict as its Parameter. But you passed in %s" %
                (type(feed)))
Y
Yu Yang 已提交
561
        if program is None:
Y
Yu Yang 已提交
562
            program = default_main_program()
Y
Yu Yang 已提交
563

Y
Yu Yang 已提交
564
        if not isinstance(program, Program):
D
dzhwinter 已提交
565 566 567
            raise TypeError(
                "Executor requires Program as its Parameter. But you passed in %s"
                % (type(program)))
Y
Yu Yang 已提交
568

569
        cache_key = _get_program_cache_key(feed, fetch_list)
570
        if use_program_cache:
Q
Qiao Longfei 已提交
571 572 573 574 575 576 577 578 579 580
            cached_program = self._get_program_cache(cache_key)
            if cached_program is None:
                cached_program = self._add_feed_fetch_ops(
                    program=program,
                    feed=feed,
                    fetch_list=fetch_list,
                    feed_var_name=feed_var_name,
                    fetch_var_name=fetch_var_name)
                self._add_program_cache(cache_key, cached_program)
            program = cached_program
581
        else:
Q
Qiao Longfei 已提交
582 583 584 585 586 587 588 589 590
            self.program_caches.pop(cache_key, None)
            program = self._add_feed_fetch_ops(
                program=program,
                feed=feed,
                fetch_list=fetch_list,
                feed_var_name=feed_var_name,
                fetch_var_name=fetch_var_name)

        self._feed_data(program, feed, feed_var_name, scope)
591
        exe.run(program.desc, scope, 0, True, True)
Q
Qiao Longfei 已提交
592
        outs = self._fetch_data(fetch_list, fetch_var_name, scope)
D
dzhwinter 已提交
593 594 595
        if return_numpy:
            outs = as_numpy(outs)
        return outs
F
flame 已提交
596

X
Xin Pan 已提交
597 598
    def _run_inference(self, exe, feed):
        return exe.run(feed)