partial_program.py 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import numpy as np
17
import six
18

19
import paddle
20 21
from paddle.fluid import framework, backward, core
from paddle.fluid.dygraph import layers
22
from paddle.fluid.dygraph.base import switch_to_static_graph
23
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
24
from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_MAGIC_NUM
25 26
from paddle.fluid.layers.utils import flatten
from paddle.fluid.layers.utils import pack_sequence_as
27 28
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.compiler import BuildStrategy
29
import paddle.compat as cpt
W
wanghuancoder 已提交
30
from paddle import _C_ops
31

32 33 34 35 36 37 38 39 40

class NestSequence(object):
    """
    A wrapper class that easily to flatten and restore the nest structure of
    given sequence.
    """

    def __init__(self, raw_input, need_check=False):
        self.__raw_input = raw_input
41
        self.__input_list = self.tolist()
42 43 44 45 46 47 48 49 50 51 52 53 54
        self.__var_ids = self._get_var_ids()
        self._check_non_variable(need_check)

    def tolist(self):
        """
        Flattens the nested sequences into single list.
        """
        return flatten(self.__raw_input)

    def restore(self, value_list):
        """
        Restores the nested sequence from value list.
        """
55
        assert len(self.__input_list) == len(value_list)
56 57 58 59
        return pack_sequence_as(self.__raw_input, value_list)

    def _get_var_ids(self):
        var_ids = []
60
        for idx, var in enumerate(self.__input_list):
61 62 63 64 65 66 67 68 69 70 71
            if isinstance(var, (framework.Variable, core.VarBase)):
                var_ids.append(idx)

        return var_ids

    def _check_non_variable(self, need_check):
        """
        Raises warning if output of traced function contains non-tensor type values.
        """
        if need_check:
            warning_types = set()
72
            for var in self.__input_list:
73 74 75
                if not isinstance(var, (framework.Variable, core.VarBase)):
                    warning_types.add(type(var))
            if warning_types:
76
                logging_utils.warn(
77 78 79 80 81 82 83 84 85 86
                    "Output of traced function contains non-tensor type values: {}. "
                    "Currently, We don't support to update them while training and will return "
                    "what we first saw. Please try to return them as tensor.".
                    format(list(warning_types)))

    @property
    def var_ids(self):
        return self.__var_ids

    def __getitem__(self, item):
87
        return self.__input_list[item]
88

89

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
class LazyInitialized(object):
    """
    Descriptor to implement lazy initialization of property.
    """

    def __init__(self, function):
        self.function = function

    def __get__(self, instance, cls):
        val = self.function(instance)
        setattr(instance, self.function.__name__, val)
        return val


def _change_is_test_status(program, is_test):
    # change all `is_test` attributes
    for block in program.blocks:
        for op in block.ops:
            if op.has_attr('is_test'):
                op._set_attr('is_test', is_test)
    return program


113
class PartialProgramLayer:
114 115 116 117 118
    """
    PartialProgramLayer wraps all the ops from layers decorated by `@declarative`
    and execute them as a static subgraph.

    .. note::
119 120 121
        **1. This is a very low level API. Users should not use this API
             directly. Please use `partial_program_from(concrete_program)`
             to create it.
122 123 124 125 126 127 128 129 130 131 132 133
        **2. LoDTensorArray is not currently supported in the output.

    Args:
        main_program(Program): The main program that contains ops need to be executed.
        inputs(list[Variable]): The input list of the decorated function by `@declarative`.
        outputs(list[Variable]): The output list of the decorated function by `@declarative`.
        parameters(list[VarBase]|None): All trainable parameters included in the program. Default None.

    Returns:
        Layer: A Layer object that run all ops internally in static mode.
    """

134 135
    def __init__(self, main_program, inputs, outputs, parameters=None,
                 **kwargs):
136
        super(PartialProgramLayer, self).__init__()
137 138
        self._inputs = NestSequence(inputs)
        self._outputs = NestSequence(outputs, need_check=True)
139
        self._params = parameters if parameters is not None else []
140

141 142 143
        self._build_strategy = kwargs.get('build_strategy', BuildStrategy())
        assert isinstance(self._build_strategy, BuildStrategy)

144
        self._origin_main_program = self._verify_program(main_program)
145 146 147
        self._tmp_scope_vec = self._create_scope_vec()
        # A fake_var to handle empty input or output
        self.__fake_vars = _create_fake_var()
148
        # Set default mode to train
149
        self._double_grads = self._get_double_grads(self._origin_main_program)
150
        self.training = True
151

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
    @LazyInitialized
    def _infer_program(self):
        """
        Lazy initialized property of infer_program.
        """
        return self._clone_for_test(self._origin_main_program)

    @LazyInitialized
    def _train_program(self):
        """
        Lazy initialized property of train_program.
        """
        train_program = self._append_backward_desc(self._origin_main_program)
        # Note: Only set grad type once after initializing train program. So we
        # put it here.
        self._set_grad_type(self._params, train_program)

        return train_program

171 172 173 174 175 176
    @LazyInitialized
    def _infer_program_id(self):
        return _hash_with_id(self._infer_program, self)

    @LazyInitialized
    def _train_program_id(self):
177 178 179 180 181
        program_id = _hash_with_id(self._train_program, self)
        core._set_cached_executor_build_strategy(program_id,
                                                 self._build_strategy)

        return program_id
182

183 184 185 186 187 188 189 190 191 192 193 194
    def _verify_program(self, main_program):
        """
        Verify that the program parameter is initialized, prune some unused params,
        and remove redundant op callstack.
        """
        # 1. Check all params from main program can be found in self._params
        self._check_params_all_inited(main_program)
        # 2. Prune the parameters not used anywhere in the program.
        self._prune_unused_params(main_program)

        return main_program

195
    @switch_to_static_graph
196
    def _append_backward_desc(self, main_program):
197 198
        # make sure all status of is_test are False in train mode.
        program = _change_is_test_status(main_program.clone(), is_test=False)
199
        targets = []
200
        for out in self._outputs.tolist():
201 202 203 204 205 206 207 208
            if isinstance(out, framework.Variable):
                targets.append(program.global_block().var(out.name))

        if targets and self._params:
            backward.gradients(targets=targets, inputs=[])

        return program

209 210 211 212 213 214 215 216 217 218
    def _prune_unused_params(self, program):
        """
        Prune the parameters not used anywhere in the program.
        The `@declarative` may only decorated a sub function which
        contains some unused parameters created in `__init__`.
        So prune these parameters to avoid unnecessary operations in
        `run_program_op`.
        """
        required_params = []
        for param in self._params:
219
            found_param = False
220
            for block in program.blocks:
221 222 223 224 225 226
                for op in block.ops:
                    if param.name in op.input_arg_names or param.name in op.output_arg_names:
                        required_params.append(param)
                        found_param = True
                        break
                if found_param:
227 228 229 230
                    break

        self._params = required_params

231 232 233 234 235 236 237 238 239 240 241
    def _get_double_grads(self, program):
        double_grads = []
        for block in program.blocks:
            for name in block.vars:
                if "@GRAD" in name:
                    var_desc = block.vars[name].desc
                    var_base = core.VarBase(var_desc.dtype(),
                                            var_desc.shape(),
                                            var_desc.name(),
                                            var_desc.type(), False)
                    double_grads.append(var_base)
242
        return self._valid_vars(double_grads)
243

244 245
    def __call__(self, inputs):
        in_vars, out_vars = self._prepare(inputs)
246 247 248

        attrs = ('global_block', self.program.desc.block(0), 'start_op_index',
                 0, 'end_op_index', self._infer_program.desc.block(0).op_size(),
249
                 'is_test', not self.training, 'program_id', self.program_id)
W
wanghuancoder 已提交
250
        _C_ops.run_program(
251 252 253 254
            self._valid_vars(in_vars),
            self._valid_vars(self._params),
            self._valid_vars(out_vars), self._tmp_scope_vec, self._double_grads,
            *attrs)
255

256 257
        restored_nest_out = self._restore_out(out_vars)
        return self._remove_no_value(restored_nest_out)
258

259 260 261 262
    @property
    def program(self):
        return self._train_program if self.training else self._infer_program

263 264 265 266
    @property
    def program_id(self):
        return self._train_program_id if self.training else self._infer_program_id

267 268 269 270 271
    def _prepare(self, inputs):
        """
        Prepare inputs, outputs, attrs.
        """
        assert isinstance(inputs, (tuple, list))
272 273
        # Flatten inputs with nested structure into single list.
        flatten_inputs = flatten(inputs)
274 275
        # Convert variable into VarBase and feed in training data.
        input_vars = []
276
        expected_place = framework._current_expected_place()
277
        for i, value in enumerate(flatten_inputs):
278 279 280
            if isinstance(value, np.ndarray):
                var = core.VarBase(
                    value=value,
281
                    name=self._inputs[i].desc.name(),
282
                    persistable=False,
283
                    place=expected_place,
284 285
                    zero_copy=True)
            elif isinstance(value, core.VarBase):
286 287 288 289 290 291 292
                # NOTE(Aurelius84): If var is on CPUPlace, it will be transformed multi times
                # into CUDAPlace when it's as input of multi Ops. so we move it in advance
                # to avoid this problem.
                if value.stop_gradient and not value.place._equals(
                        expected_place):
                    var = value._copy_to(expected_place, False)
                    var.stop_gradient = True
293 294
                else:
                    var = value
295
                var.name = self._inputs[i].desc.name()
296 297 298
            else:
                continue
            input_vars.append(var)
299

300 301
        def create_out(var_id):
            var = self._outputs[var_id]
302
            assert isinstance(var, framework.Variable)
303 304 305 306
            var_desc = var.desc
            var_base = core.VarBase(var_desc.dtype(),
                                    var_desc.shape(),
                                    var_desc.name(), var_desc.type(), False)
307 308 309 310 311 312
            return var_base

        # Create VarBase to receive output data.
        out_vars = list(map(create_out, self._outputs.var_ids))

        return input_vars, out_vars
313

314
    def _create_scope_vec(self):
315 316 317 318 319
        # Hold forward variables
        tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
                                     "program_out_scope",
                                     core.VarDesc.VarType.STEP_SCOPES, True)

320 321 322
        inner_scope = core.Scope()
        tmp_scope_vec.value().set_scope(inner_scope)
        return tmp_scope_vec
323

324 325 326 327 328 329 330 331 332
    def _restore_out(self, out_vars):
        """
        Restores same nested outputs by only replacing the Variable with VarBase.
        """

        flatten_outputs = self._outputs.tolist()
        for i, idx in enumerate(self._outputs.var_ids):
            flatten_outputs[idx] = out_vars[i]
        outs = self._outputs.restore(flatten_outputs)
333
        if outs is not None and len(outs) == 1:
334 335 336 337
            outs = outs[0]

        return outs

338 339 340 341
    @switch_to_static_graph
    def _clone_for_test(self, main_program):
        return main_program.clone(for_test=True)

342
    def _is_no_value(self, var):
343 344 345
        if isinstance(var, core.VarBase) and var.shape == [1]:
            # NOTE: .numpy() will insert MemcpySync operation, it hits performance.
            if var.numpy()[0] == RETURN_NO_VALUE_MAGIC_NUM:
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
                return True
        return False

    def _remove_no_value(self, out_vars):
        """
        Removes invalid value for various-length return statement
        """
        if isinstance(out_vars, core.VarBase):
            if self._is_no_value(out_vars):
                return None
            return out_vars
        elif isinstance(out_vars, (tuple, list)):
            if isinstance(out_vars, tuple):
                res = tuple(
                    var for var in out_vars if not self._is_no_value(var))
            else:
                # isinstance(out_vars, list)
                res = [var for var in out_vars if not self._is_no_value(var)]

            has_removed = (len(out_vars) > len(res))
            # len(out_vars) > len(res) means we have removed var. This is
            # preventing out_vars is empty or just one element at the beginning
            if len(res) == 0 and has_removed:
                return None
            elif len(res) == 1 and has_removed:
                return res[0]
            return res

        return out_vars

376
    def _set_grad_type(self, params, train_program):
377 378 379 380 381 382 383 384
        # NOTE: if user set sparse gradient mode, the param's gradient
        # will be SelectedRows, not LoDTensor. But tracer will just
        # set param grad VarBase by forward VarBase(LoDTensor)
        # If we don't change grad_var type here, RunProgramOp need
        # transform SelectedRows to LoDTensor forcibly, it may not
        # be user wanted result.
        for param in params:
            grad_name = param.name + core.grad_var_suffix()
385
            grad_var = train_program.desc.block(0).find_var(
386 387 388 389 390 391
                cpt.to_bytes(grad_name))
            # NOTE: cannot find var desc maybe no problem, such as in batch_norm
            if grad_var is None:
                continue
            param._set_grad_type(grad_var.type())

392 393 394 395 396 397 398 399 400 401 402 403 404
    def _remove_op_call_stack(self, main_program):
        """
        Remove op's python call stack with redundant low-level error messages related to
        transforamtions to avoid confusing users.
        """
        assert isinstance(main_program, framework.Program)
        for block in main_program.blocks:
            for op in block.ops:
                if op.has_attr("op_callstack"):
                    op._remove_attr("op_callstack")

        return main_program

405 406 407 408 409 410 411 412 413 414 415 416
    def _check_params_all_inited(self, main_program):
        """
        Check all params from main program are already initialized, see details as follows:
            1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.
            2. all parameters from transformed program can be found in self._params.
               Because they share same data with ParamBase of original dygraph.
        """
        if not isinstance(self._params, (list, tuple)):
            raise TypeError(
                "Type of self._params in PartialProgramLayer should be list or tuple, but received %s."
                % type(self._params))

417 418 419 420
        param_and_buffer_names_set = set()
        for i, var in enumerate(self._params):
            # self._params constains parameters and buffers with persistable=True.
            if not isinstance(var, core.VarBase):
421
                raise TypeError(
422 423 424
                    'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'.
                    format(i, type(var)))
            param_and_buffer_names_set.add(var.name)
425 426

        for block in main_program.blocks:
427
            for name, var in six.iteritems(block.vars):
428
                if isinstance(var, framework.Parameter):
429
                    if name not in param_and_buffer_names_set:
430 431 432 433 434 435 436 437
                        raise ValueError(
                            "\n\tWe don't support to define layer with parameters in the function "
                            "decorated by `@declarative`.\n\tBecause that will re-defined parameters "
                            "every time when you run the function.\n\t"
                            "But we found parameter(%s) was created in the decorated function.\n\t"
                            "Please define the layer with parameters in `__init__` function."
                            % name)

438 439 440 441 442 443 444 445
    def _valid_vars(self, vars):
        """
        Note: run_program_op.InferShape requires `X`/'Out' not be null.
        But it's common in dy2static, fake varBase is created to handle the
        problem.
        """
        return vars if vars else self.__fake_vars

446

447
def _create_fake_var():
448
    """
449
    Create a fake_var (force on CPU) to handle empty input or output
450 451
    """
    return [
452 453
        core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var",
                     core.VarDesc.VarType.RAW, False)
454 455 456 457 458 459 460 461
    ]


def partial_program_from(concrete_program):
    inputs = concrete_program.inputs
    if inputs and isinstance(inputs[0], layers.Layer):
        inputs = inputs[1:]

462 463 464
    return PartialProgramLayer(
        concrete_program.main_program, inputs, concrete_program.outputs,
        concrete_program.parameters, **concrete_program.kwargs)