test_complex_grad_accumulated.py 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np

import paddle

import paddle.fluid.core as core
23
from paddle.fluid.framework import _test_eager_guard
24 25 26


class Optimization_ex1(paddle.nn.Layer):
27

28 29 30
    def __init__(self,
                 shape,
                 dtype,
31
                 param_attr=paddle.nn.initializer.Uniform(low=-5., high=5.)):
32 33
        super(Optimization_ex1, self).__init__()

34 35 36 37 38 39 40 41
        self.theta0 = self.create_parameter(shape=shape,
                                            attr=param_attr,
                                            dtype=dtype,
                                            is_bias=False)
        self.theta1 = self.create_parameter(shape=shape,
                                            attr=param_attr,
                                            dtype=dtype,
                                            is_bias=False)
42
        self.A = paddle.to_tensor(
43 44 45 46 47
            np.random.random((4, 4)).astype(dtype) +
            np.random.random((4, 4)).astype(dtype) * 1j)
        self.B = paddle.to_tensor(np.random.random(
            (4, 4)).astype(dtype) + np.random.random((4, 4)).astype(dtype) * 1j,
                                  stop_gradient=False)
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

    def forward(self, mode=1):
        jj = paddle.to_tensor(np.array([1j]).astype(np.complex64))
        if mode == 1:
            # run all calc in one step
            loss = paddle.sum(self.A + (self.theta0 + self.theta1 * jj)) * (
                paddle.sum(self.A + (self.theta0 + self.theta1 * jj)).conj())
            return loss.real()
        elif mode == 2:
            # run in two step
            self.theta = self.theta0 + self.theta1 * jj
            loss = paddle.sum(self.A + self.theta) * (
                paddle.sum(self.A + self.theta).conj())
            return loss.real()
        elif mode == 3:
            # run without param
64 65
            loss = paddle.sum(self.A + self.B) * (paddle.sum(self.A +
                                                             self.B).conj())
66 67 68 69 70 71
            return loss.real()
        else:
            raise NotImplementedError


class TestComplexGradAccumulated(unittest.TestCase):
72

73 74 75 76
    def setUp(self):
        self.devices = ['cpu']
        if core.is_compiled_with_cuda():
            self.devices.append('gpu')
77 78
        self.iter = 3
        self.learning_rate = 0.5
79 80 81
        self.dtypes = ['float32', 'float64']
        self.theta_size = [4, 4]

82
    def train(self, device, dtype, mode):
83 84 85
        paddle.set_device(device)

        myLayer = Optimization_ex1(self.theta_size, dtype)
86 87
        optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate,
                                         parameters=myLayer.parameters())
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
        for iter in range(self.iter):
            loss = myLayer(mode)
            loss.backward()

            optimizer.step()
            optimizer.clear_grad()

    def train_no_clear_grad(self, device, dtype, mode):
        paddle.set_device(device)

        myLayer = Optimization_ex1(self.theta_size, dtype)
        optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate,
                                         parameters=myLayer.parameters())

        for iter in range(self.iter):
            loss = myLayer(mode)
            loss.backward()

            optimizer.step()
108 109 110 111

    def test_case_one_step(self):
        for dev in self.devices:
            for dtype in self.dtypes:
112 113
                self.train(dev, dtype, 1)
                self.train_no_clear_grad(dev, dtype, 1)
114 115 116 117

    def test_case_two_step(self):
        for dev in self.devices:
            for dtype in self.dtypes:
118 119
                self.train(dev, dtype, 2)
                self.train_no_clear_grad(dev, dtype, 2)
120 121 122 123

    def test_case_non_param(self):
        for dev in self.devices:
            for dtype in self.dtypes:
124 125
                self.train(dev, dtype, 3)
                self.train_no_clear_grad(dev, dtype, 3)
126

127 128 129 130 131 132
    def test_eager(self):
        with _test_eager_guard():
            self.test_case_one_step()
            self.test_case_two_step()
            self.test_case_non_param()

133 134 135

if __name__ == '__main__':
    unittest.main()