test_complex_grad_accumulated.py 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle

import paddle.fluid.core as core
21
from paddle.fluid.framework import _test_eager_guard
22 23 24


class Optimization_ex1(paddle.nn.Layer):
25

26 27 28
    def __init__(self,
                 shape,
                 dtype,
29
                 param_attr=paddle.nn.initializer.Uniform(low=-5., high=5.)):
30 31
        super(Optimization_ex1, self).__init__()

32 33 34 35 36 37 38 39
        self.theta0 = self.create_parameter(shape=shape,
                                            attr=param_attr,
                                            dtype=dtype,
                                            is_bias=False)
        self.theta1 = self.create_parameter(shape=shape,
                                            attr=param_attr,
                                            dtype=dtype,
                                            is_bias=False)
40
        self.A = paddle.to_tensor(
41 42 43 44 45
            np.random.random((4, 4)).astype(dtype) +
            np.random.random((4, 4)).astype(dtype) * 1j)
        self.B = paddle.to_tensor(np.random.random(
            (4, 4)).astype(dtype) + np.random.random((4, 4)).astype(dtype) * 1j,
                                  stop_gradient=False)
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

    def forward(self, mode=1):
        jj = paddle.to_tensor(np.array([1j]).astype(np.complex64))
        if mode == 1:
            # run all calc in one step
            loss = paddle.sum(self.A + (self.theta0 + self.theta1 * jj)) * (
                paddle.sum(self.A + (self.theta0 + self.theta1 * jj)).conj())
            return loss.real()
        elif mode == 2:
            # run in two step
            self.theta = self.theta0 + self.theta1 * jj
            loss = paddle.sum(self.A + self.theta) * (
                paddle.sum(self.A + self.theta).conj())
            return loss.real()
        elif mode == 3:
            # run without param
62 63
            loss = paddle.sum(self.A + self.B) * (paddle.sum(self.A +
                                                             self.B).conj())
64 65 66 67 68 69
            return loss.real()
        else:
            raise NotImplementedError


class TestComplexGradAccumulated(unittest.TestCase):
70

71 72 73 74
    def setUp(self):
        self.devices = ['cpu']
        if core.is_compiled_with_cuda():
            self.devices.append('gpu')
75 76
        self.iter = 3
        self.learning_rate = 0.5
77 78 79
        self.dtypes = ['float32', 'float64']
        self.theta_size = [4, 4]

80
    def train(self, device, dtype, mode):
81 82 83
        paddle.set_device(device)

        myLayer = Optimization_ex1(self.theta_size, dtype)
84 85
        optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate,
                                         parameters=myLayer.parameters())
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
        for iter in range(self.iter):
            loss = myLayer(mode)
            loss.backward()

            optimizer.step()
            optimizer.clear_grad()

    def train_no_clear_grad(self, device, dtype, mode):
        paddle.set_device(device)

        myLayer = Optimization_ex1(self.theta_size, dtype)
        optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate,
                                         parameters=myLayer.parameters())

        for iter in range(self.iter):
            loss = myLayer(mode)
            loss.backward()

            optimizer.step()
106 107 108 109

    def test_case_one_step(self):
        for dev in self.devices:
            for dtype in self.dtypes:
110 111
                self.train(dev, dtype, 1)
                self.train_no_clear_grad(dev, dtype, 1)
112 113 114 115

    def test_case_two_step(self):
        for dev in self.devices:
            for dtype in self.dtypes:
116 117
                self.train(dev, dtype, 2)
                self.train_no_clear_grad(dev, dtype, 2)
118 119 120 121

    def test_case_non_param(self):
        for dev in self.devices:
            for dtype in self.dtypes:
122 123
                self.train(dev, dtype, 3)
                self.train_no_clear_grad(dev, dtype, 3)
124

125 126 127 128 129 130
    def test_eager(self):
        with _test_eager_guard():
            self.test_case_one_step()
            self.test_case_two_step()
            self.test_case_non_param()

131 132 133

if __name__ == '__main__':
    unittest.main()