test_complex_simplenet.py 2.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle

import paddle.fluid.core as core
21
from paddle.fluid.framework import _test_eager_guard
22 23 24


class Optimization_ex1(paddle.nn.Layer):
25

26 27
    def __init__(self,
                 shape,
28
                 param_attr=paddle.nn.initializer.Uniform(low=-5., high=5.),
29 30 31
                 dtype='float32'):
        super(Optimization_ex1, self).__init__()

32 33 34 35
        self.theta = self.create_parameter(shape=shape,
                                           attr=param_attr,
                                           dtype=dtype,
                                           is_bias=False)
36 37 38 39 40 41 42 43 44
        self.A = paddle.to_tensor(
            np.random.randn(4, 4) + np.random.randn(4, 4) * 1j)

    def forward(self):
        loss = paddle.add(self.theta, self.A)
        return loss.real()


class TestComplexSimpleNet(unittest.TestCase):
45

46 47 48 49 50 51 52 53 54 55 56 57
    def setUp(self):
        self.devices = ['cpu']
        if core.is_compiled_with_cuda():
            self.devices.append('gpu')
        self.iter = 10
        self.learning_rate = 0.5
        self.theta_size = [4, 4]

    def train(self, device):
        paddle.set_device(device)

        myLayer = Optimization_ex1(self.theta_size)
58 59
        optimizer = paddle.optimizer.Adam(learning_rate=self.learning_rate,
                                          parameters=myLayer.parameters())
60 61 62 63 64 65 66 67 68 69 70 71

        for itr in range(self.iter):
            loss = myLayer()
            loss.backward()

            optimizer.step()
            optimizer.clear_grad()

    def test_train_success(self):
        for dev in self.devices:
            self.train(dev)

72 73 74 75
    def test_eager(self):
        with _test_eager_guard():
            self.test_train_success()

76 77 78

if __name__ == '__main__':
    unittest.main()