test_run_program_op.py 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import contextlib
import unittest
import numpy as np

19
import paddle
20
from paddle import _legacy_C_ops
21
import paddle.fluid as fluid
22
from paddle.fluid import core, framework
23
from paddle.fluid.layers.utils import _hash_with_id
0
0x45f 已提交
24
from paddle.fluid.framework import _in_eager_mode_
25 26
from paddle.fluid.executor import _is_enable_standalone_executor, _is_dy2st_enable_standalone_executor
from paddle.fluid.dygraph.base import switch_to_static_graph
27

28 29
paddle.enable_static()

30 31 32 33 34 35 36 37 38 39 40 41

@contextlib.contextmanager
def program_scope_guard():
    prog = fluid.Program()
    startup_prog = fluid.Program()
    scope = fluid.core.Scope()
    with fluid.scope_guard(scope):
        with fluid.program_guard(prog, startup_prog):
            with fluid.unique_name.guard():
                yield


42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
@switch_to_static_graph
def _add_build_strategy_for(input_program, start_op_index, end_op_index):
    compiled_program = paddle.static.CompiledProgram(
        core.Graph(input_program.desc, start_op_index, end_op_index),
        build_strategy=paddle.static.BuildStrategy())
    compiled_program._compile(core.Scope(),
                              paddle.framework._current_expected_place())
    ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph)
    builded_program = ir_graph.to_program()
    return builded_program


@switch_to_static_graph
def _build_program_by_desc(program_desc):
    prog = framework.Program()
    prog.desc = program_desc
    prog.blocks = [
59
        framework.Block(prog, i) for i in range(prog.desc.num_blocks())
60 61 62 63 64
    ]
    prog._sync_with_cpp()
    return prog


65
# NOTE: Because RunProgramOp has a special output of type std::vector<Scope *>,
66 67 68 69 70 71 72
# the OpTest cannot be used in RunProgramOp. The variable type cannot be specified
# when creating output variables in OpTest, default type is LoDTensor
# NOTE: the gradient test method in OpTest also cannot be used for RunProgramOp,
# because it hold BlockDesc type attr, OperatorFactory can't parse this attr type
# when create Operator, so here compare gradients with static graph
# NOTE: Here rewrite a simple unittest framework for RunProgramOp
class RunProgramOpTest(unittest.TestCase):
73

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
    def build_model(self):
        raise NotImplementedError(
            "RunProgramOp test should implement build_model")

    def check_output(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            # TODO: RunProgramOp is not recommended for use in static mode now
            self.expect_outs = self.run_static_model(place, is_test=True)
            self.check_output_with_place(place)

    def check_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            # TODO: RunProgramOp is not recommended for use in static mode now
            self.expect_grads = self.run_static_model(place, is_test=False)
            self.check_grad_with_place(place)

    def run_static_model(self, place, is_test=True):
        with program_scope_guard():
            startup_program = fluid.default_startup_program()
            main_program = fluid.default_main_program()

            self.build_model()

            exe = fluid.Executor(place)
            exe.run(startup_program)

            if is_test:
                fetch_list = self.output_names['Out']
            else:
                fetch_list = self.get_param_grad_names()

            outs = exe.run(main_program,
                           feed=self.inputs['X'],
                           fetch_list=fetch_list)
            return outs

    def get_program_desc(self):
        with program_scope_guard():
            fwd_op_num = self.build_model()
            return fluid.default_main_program().desc, fwd_op_num

121 122 123 124 125 126 127 128 129
    def get_forward_backward_program_desc(self, whole_program_desc,
                                          forward_op_num, output_num):
        program = _build_program_by_desc(whole_program_desc)
        forward_program = _add_build_strategy_for(program, 0, forward_op_num)
        backward_program = _add_build_strategy_for(
            program, forward_op_num + 2 * output_num,
            program.desc.block(0).op_size())
        return forward_program.desc, backward_program.desc

130
    def prepare_attrs(self):
131 132 133 134 135 136
        return [
            'global_block',
            self.program_desc.block(0), 'start_op_index', 0, 'end_op_index',
            self.fwd_op_num, 'program_id',
            _hash_with_id(self.program_desc, self)
        ]
137 138 139 140 141 142 143 144 145 146 147 148

    def get_param_grad_names(self):
        grad_names = []
        for var_name in self.inputs['Params']:
            grad_names.append(var_name + core.grad_var_suffix())
        return grad_names

    def check_output_with_place(self, place):
        # Step 1. run op
        actual_outs = self.calc_dygraph_output(place)

        # Step 2. compare output
149
        for expect_v, actual_v in zip(self.expect_outs, actual_outs):
150 151 152 153
            np.testing.assert_allclose(expect_v,
                                       actual_v.numpy(),
                                       rtol=1e-05,
                                       atol=1e-05)
154 155 156 157 158 159

    def check_grad_with_place(self, place):
        # Step 1. calc grads
        actual_grads = self.calc_dygraph_grad(place)

        # Step 2. compare grads
160
        for expect_v, actual_v in zip(self.expect_grads, actual_grads):
161
            np.testing.assert_array_almost_equal(expect_v, actual_v)
162 163 164 165
            np.testing.assert_allclose(expect_v,
                                       actual_v,
                                       rtol=1e-05,
                                       atol=1e-05)
166 167

    def prepare_dygraph_input(self, place, return_param_list=False):
168

169
        def create_var_base(is_input, name, np_value, stop_gradient):
0
0x45f 已提交
170
            if _in_eager_mode_:
171 172 173 174
                var = core.eager.Tensor(value=np_value,
                                        name=name,
                                        place=place,
                                        zero_copy=True)
0
0x45f 已提交
175
            else:
176 177 178 179
                var = core.VarBase(value=np_value,
                                   name=name,
                                   place=place,
                                   zero_copy=True)
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
            var.stop_gradient = stop_gradient
            return var

        # build inputs
        inputs = {}
        param_list = []
        inputs['X'] = []
        for name, np_value in self.inputs['X'].items():
            var = create_var_base(True, name, np_value, True)
            inputs['X'].append(var)
        inputs['Params'] = []
        for name, np_value in self.inputs['Params'].items():
            var = create_var_base(True, name, np_value, False)
            inputs['Params'].append(var)
            if return_param_list:
                param_list.append(var)

        if return_param_list:
            return inputs, param_list
        return inputs

    def prepare_dygraph_output(self):
202

203 204 205 206 207 208 209 210 211 212 213
        def create_var_base(is_input, name):
            var = framework._varbase_creator(dtype=None, shape=None, name=name)
            var.stop_gradient = False
            return var

        # build outputs
        outputs = {}
        outputs['Out'] = []
        for name in self.output_names['Out']:
            outputs['Out'].append(create_var_base(False, name))

0
0x45f 已提交
214 215 216 217 218 219 220 221 222
        if _in_eager_mode_:
            outputs['OutScope'] = [core.Scope()]
        else:
            outputs['OutScope'] = framework._varbase_creator(
                type=core.VarDesc.VarType.STEP_SCOPES,
                name="program_out_scope",
                persistable=True)
            inner_scope = core.Scope()
            outputs['OutScope'].value().set_scope(inner_scope)
223 224

        outputs['DOut'] = [create_var_base(False, "Fake_var")]
225 226 227
        return outputs

    def calc_dygraph_output(self, place):
228 229 230
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()

231 232 233 234
        with fluid.dygraph.guard(place):
            inputs = self.prepare_dygraph_input(place)
            outputs = self.prepare_dygraph_output()

235 236 237 238 239 240 241 242 243 244 245
            forward_program_desc, backward_program_desc = self.get_forward_backward_program_desc(
                self.program_desc, self.fwd_op_num, len(outputs['Out']))

            use_interpretorcore = _is_enable_standalone_executor(
            ) and _is_dy2st_enable_standalone_executor()
            self.attrs.extend(('use_interpretorcore', use_interpretorcore))
            if use_interpretorcore:
                self.attrs.extend(
                    ('forward_global_block', forward_program_desc.block(0),
                     'backward_global_block', backward_program_desc.block(0)))

246 247 248
            _legacy_C_ops.run_program(inputs['X'], inputs['Params'],
                                      outputs['Out'], outputs['OutScope'],
                                      outputs['DOut'], None, *self.attrs)
249

250 251 252
            return outputs['Out']

    def calc_dygraph_grad(self, place):
253 254 255
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()

256 257 258 259 260
        with fluid.dygraph.guard(place):
            # Step 1. run forward
            inputs, input_param_list = self.prepare_dygraph_input(place, True)
            outputs = self.prepare_dygraph_output()

261 262 263 264 265 266 267 268 269 270 271
            forward_program_desc, backward_program_desc = self.get_forward_backward_program_desc(
                self.program_desc, self.fwd_op_num, len(outputs['Out']))

            use_interpretorcore = _is_enable_standalone_executor(
            ) and _is_dy2st_enable_standalone_executor()
            self.attrs.extend(('use_interpretorcore', use_interpretorcore))
            if use_interpretorcore:
                self.attrs.extend(
                    ('forward_global_block', forward_program_desc.block(0),
                     'backward_global_block', backward_program_desc.block(0)))

272 273 274
            _legacy_C_ops.run_program(inputs['X'], inputs['Params'],
                                      outputs['Out'], outputs['OutScope'],
                                      outputs['DOut'], None, *self.attrs)
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

            for param in input_param_list:
                var_type = self._get_grad_vartype(param.name)
                if var_type is None:
                    continue
                param._set_grad_type(var_type)

            # Step 2. run backward
            # NOTE: in unittest, only support single output now
            actual_outs = outputs['Out']
            assert len(actual_outs) == 1
            actual_outs[0].backward()

            # Step 3. prepare grads
            grads = []
            for param in input_param_list:
                grad = param.gradient()
                grads.append(grad)
            return grads

    def _get_grad_vartype(self, name):
        assert self.program_desc is not None
        grad_name = name + core.grad_var_suffix()
298
        for i in range(self.program_desc.num_blocks()):
299
            block = self.program_desc.block(i)
300
            var_desc = block.find_var_recursive(grad_name.encode())
301 302 303 304
            return var_desc.type() if var_desc is not None else None


class TestRunProgramOpWithFC(RunProgramOpTest):
305

306 307 308 309 310 311 312 313 314 315 316
    def setUp(self):
        self.op_type = "run_program"
        self.dtype = np.float32
        self.input_names = {
            'X': ['img'],
            'Params': ['weight_param', 'bias_param']
        }
        self.output_names = {'Out': ['fc_0.tmp_2']}

        self.inputs = {
            'X': {
317 318
                self.input_names['X'][0]:
                np.random.random((32, 1, 28, 28)).astype(self.dtype)
319 320
            },
            'Params': {
321 322 323 324
                self.input_names['Params'][0]:
                np.random.random((784, 10)).astype(self.dtype),
                self.input_names['Params'][1]:
                np.random.random((32, 10)).astype(self.dtype)
325 326 327 328 329 330 331 332 333 334 335
            }
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad()

    def build_model(self):
        # 1. simple model
336 337 338
        img = fluid.data(name=self.input_names['X'][0],
                         shape=[None, 1, 28, 28],
                         dtype='float32')
339 340 341
        weight_attr = fluid.ParamAttr(
            name=self.input_names['Params'][0],
            learning_rate=0.5,
342 343
            initializer=fluid.initializer.NumpyArrayInitializer(
                self.inputs['Params'][self.input_names['Params'][0]]),
344 345 346 347
            trainable=True)
        bias_attr = fluid.ParamAttr(
            name=self.input_names['Params'][1],
            learning_rate=0.5,
348 349
            initializer=fluid.initializer.NumpyArrayInitializer(
                self.inputs['Params'][self.input_names['Params'][1]]),
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
            trainable=True)
        pred = fluid.layers.fc(input=img,
                               size=10,
                               param_attr=weight_attr,
                               bias_attr=bias_attr,
                               act='relu')
        # 2. get forward op num
        fwd_op_num = fluid.default_main_program().global_block().desc.op_size()
        # 3. append backward
        grads = fluid.backward.gradients(targets=[pred], inputs=[img])

        return fwd_op_num


class TestRunProgramOpWithEmbedding(RunProgramOpTest):
365

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
    def setUp(self):
        self.op_type = "run_program"
        self.dtype = np.float32
        self.input_names = {'X': ['x'], 'Params': ['emb_weight']}
        self.output_names = {'Out': ['reduce_sum_0.tmp_0']}

        self.inputs = {
            'X': {
                'x': np.array([[1, 3, 0, 4, 7]]).astype("int64")
            },
            'Params': {
                'emb_weight': np.random.random(size=(10, 16)).astype("float32")
            }
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
385
        # NOTE: fecth not support SelectedRows, catnot compare
386 387 388 389 390 391 392 393 394 395
        # sparse gradients with staic mode, only run dygraph
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            # TODO: RunProgramOp is not recommended for use in static mode now
            self.calc_dygraph_grad(place)

    def build_model(self):
        # 1. simple model
396 397 398
        x = fluid.layers.data(name=self.input_names['X'][0],
                              shape=[5],
                              dtype='int64')
399 400 401 402 403 404
        emb = fluid.input.embedding(
            input=x,
            size=[10, 16],
            param_attr=fluid.ParamAttr(
                name="emb_weight",
                learning_rate=10,
405 406
                initializer=fluid.initializer.NumpyArrayInitializer(
                    self.inputs['Params'][self.input_names['Params'][0]])),
407 408 409 410 411 412 413 414 415 416
            is_sparse=True)
        y = fluid.layers.reduce_sum(emb, dim=-1)
        # 2. get forward op num
        fwd_op_num = fluid.default_main_program().global_block().desc.op_size()
        # 3. append backward
        grads = fluid.backward.gradients(targets=[y], inputs=[x])

        return fwd_op_num


417
class Net(paddle.nn.Layer):
418

419 420 421 422 423 424 425 426 427 428 429 430 431
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = paddle.nn.Linear(10, 10)
        self.fc2 = paddle.nn.Linear(10, 1)

    def forward(self, x):
        out = self.fc1(x)
        out.stop_gradient = True
        out = self.fc2(out)
        return out


class TestParametersWithStopGradient(unittest.TestCase):
432

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
    def setUp(self):
        self.seed = 2021
        self.iter = 5

    def train(self, to_static):
        # prepare env
        paddle.seed(self.seed)

        net = Net()
        if to_static:
            net = paddle.jit.to_static(net)
        sgd = paddle.optimizer.SGD(0.01, parameters=net.parameters())

        for i in range(self.iter):
            x = paddle.rand([4, 10])
            out = net(x)
            loss = paddle.mean(out)

            loss.backward()
            sgd.minimize(loss)
            net.clear_gradients()

        return loss

    def test_stop_gradient(self):
        paddle.disable_static()

        dy_loss = self.train(to_static=False)
        st_loss = self.train(to_static=True)
        self.assertEqual(dy_loss[0], st_loss[0])

        paddle.enable_static()


467 468
if __name__ == "__main__":
    unittest.main()