test_eager_run_program.py 5.6 KB
Newer Older
0
0x45f 已提交
1
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
#
0
0x45f 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
0
0x45f 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
0
0x45f 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest

0
0x45f 已提交
17
import numpy as np
18 19

import paddle
20
from paddle import _legacy_C_ops
0
0x45f 已提交
21
from paddle.fluid import core
22
from paddle.fluid.dygraph.base import switch_to_static_graph
23 24
from paddle.fluid.executor import (
    _is_dy2st_enable_standalone_executor,
25
    _is_enable_standalone_executor,
26
)
27
from paddle.fluid.framework import Variable, _in_legacy_dygraph
28
from paddle.fluid.layers.utils import _hash_with_id
0
0x45f 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54


def _append_backward_desc(main_program, outs):
    # make sure all status of is_test are False in train mode.
    program = main_program.clone()
    targets = []
    for out in outs:
        if isinstance(out, Variable):
            targets.append(program.global_block().var(out.name))

    if targets:
        paddle.fluid.backward.gradients(targets=targets, inputs=[])

    return program


# def _set_grad_type(params, train_program):
#     # NOTE: if user set sparse gradient mode, the param's gradient
#     # will be SelectedRows, not LoDTensor. But tracer will just
#     # set param grad VarBase by forward VarBase(LoDTensor)
#     # If we don't change grad_var type here, RunProgramOp need
#     # transform SelectedRows to LoDTensor forcibly, it may not
#     # be user wanted result.
#     for param in params:
#         grad_name = param.name + core.grad_var_suffix()
#         grad_var = train_program.desc.block(0).find_var(
55
#             grad_name.encode())
0
0x45f 已提交
56 57 58 59 60 61 62 63 64 65
#         # NOTE: cannot find var desc maybe no problem, such as in batch_norm
#         if grad_var is None:
#             continue
#         param._set_grad_type(grad_var.type())


def _create_out(var):
    assert isinstance(var, Variable)
    var_desc = var.desc
    varbase = None
J
Jiabin Yang 已提交
66
    if _in_legacy_dygraph():
67 68 69 70 71 72 73
        var_base = core.VarBase(
            var_desc.dtype(),
            var_desc.shape(),
            var_desc.name(),
            var_desc.type(),
            False,
        )
0
0x45f 已提交
74
    else:
75 76 77 78 79 80 81
        var_base = core.eager.Tensor(
            var_desc.dtype(),
            var_desc.shape(),
            var_desc.name(),
            var_desc.type(),
            False,
        )
0
0x45f 已提交
82 83 84
    return var_base


85 86 87 88
@switch_to_static_graph
def _add_build_strategy_for(input_program, start_op_index, end_op_index):
    compiled_program = paddle.static.CompiledProgram(
        core.Graph(input_program.desc, start_op_index, end_op_index),
89 90 91 92 93
        build_strategy=paddle.static.BuildStrategy(),
    )
    compiled_program._compile(
        core.Scope(), paddle.framework._current_expected_place()
    )
94 95 96 97 98
    ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph)
    builded_program = ir_graph.to_program()
    return builded_program


0
0x45f 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111
class TestRunProgram(unittest.TestCase):
    def test_eager(self):
        paddle.set_device('cpu')
        paddle.enable_static()
        # step 1: construct program
        x = paddle.static.data(shape=[2, 4], name='x')
        x.stop_gradient = False
        y = paddle.static.data(shape=[4, 2], name='y')
        y.stop_gradient = False
        out = paddle.matmul(x, y)

        main_program = paddle.static.default_main_program()
        program = _append_backward_desc(main_program, [out])
112
        forward_program = _add_build_strategy_for(
113 114
            program, 0, main_program.desc.block(0).op_size()
        )
115 116 117
        backward_program = _add_build_strategy_for(
            program,
            main_program.desc.block(0).op_size() + 2,
118 119
            program.desc.block(0).op_size(),
        )
0
0x45f 已提交
120 121 122

        paddle.disable_static('cpu')
        # step 2: call run_program in eager mode
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
        x_t = paddle.ones([2, 4])
        x_t.name = "x"
        x_t.stop_gradient = False
        y_t = paddle.ones([4, 2])
        y_t.name = "y"
        y_t.stop_gradient = False

        fake_var = paddle.zeros([1])
        fake_var.name = 'Fake_var'

        out_t = _create_out(out)

        scope = core.Scope()
        attrs = [
            'global_block',
            program.desc.block(0),
            'start_op_index',
            0,
            'end_op_index',
            main_program.desc.block(0).op_size(),
            'is_test',
            False,
            'program_id',
            _hash_with_id(program),
        ]
148

149 150 151 152 153 154 155 156 157 158 159 160 161
        use_interpretorcore = (
            _is_enable_standalone_executor()
            and _is_dy2st_enable_standalone_executor()
        )
        attrs.extend(('use_interpretorcore', use_interpretorcore))
        if use_interpretorcore:
            attrs.extend(
                (
                    'forward_global_block',
                    forward_program.desc.block(0),
                    'backward_global_block',
                    backward_program.desc.block(0),
                )
162
            )
0
0x45f 已提交
163

164 165 166
        _legacy_C_ops.run_program(
            [x_t, y_t], [fake_var], [out_t], [scope], [fake_var], None, *attrs
        )
0
0x45f 已提交
167

168 169 170 171 172 173
        loss = paddle.mean(out_t)
        loss.backward()

        np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
        np.testing.assert_array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy())
        np.testing.assert_array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy())
0
0x45f 已提交
174 175 176 177


if __name__ == '__main__':
    unittest.main()