“47aea0cdf8bb2487be3efd89e42d71bf81d30f18”上不存在“develop/doc_cn/design/releasing_process.html”
test_py_func_op.py 7.3 KB
Newer Older
S
sneaxiy 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

S
sneaxiy 已提交
15
import os
S
sneaxiy 已提交
16
import paddle.fluid as fluid
17
from paddle.fluid import compiler
S
sneaxiy 已提交
18 19 20 21 22
import paddle
import unittest
import six
import numpy as np

S
sneaxiy 已提交
23 24 25 26 27
dev_cnt = 2
if fluid.core.is_compiled_with_cuda():
    dev_cnt = fluid.core.get_cuda_device_count()
os.environ['CPU_NUM'] = str(dev_cnt)

S
sneaxiy 已提交
28

S
sneaxiy 已提交
29
def dummy_func_with_no_input():
S
sneaxiy 已提交
30
    return np.array([0], dtype='float32')
S
sneaxiy 已提交
31 32 33 34 35 36


def dummy_func_with_no_output(x):
    pass


37 38 39 40
def dummy_func_with_multi_input_output(x, y):
    return np.array(x), np.array(y)


S
sneaxiy 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
def tanh(x):
    return np.tanh(x)


def tanh_grad(y, dy):
    return np.array(dy) * (1 - np.square(np.array(y)))


def cross_entropy(logits, labels):
    logits = np.array(logits)
    labels = np.array(labels)
    M = logits.shape[0]
    N = logits.shape[1]
    ret = np.ndarray([M, 1]).astype(logits.dtype)
    for idx in six.moves.range(M):
        ret[idx][0] = -np.log(logits[idx][labels[idx][0]])
    return ret


def cross_entropy_grad(logits, labels, bwd_dout):
    logits = np.array(logits)
    labels = np.array(labels)
    bwd_dout = np.array(bwd_dout)
    M = logits.shape[0]
    N = logits.shape[1]
    dlogits = np.zeros([M, N]).astype(logits.dtype)
    for idx in six.moves.range(M):
68 69
        dlogits[idx][labels[idx]
                     [0]] = -bwd_dout[idx] / logits[idx][labels[idx][0]]
S
sneaxiy 已提交
70 71 72 73 74 75 76 77 78
    return dlogits, None


def simple_fc_net(img, label, use_py_func_op):
    hidden = img
    for idx in range(4):
        hidden = fluid.layers.fc(
            hidden,
            size=200,
79 80
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(
                value=1.0)))
S
sneaxiy 已提交
81
        if not use_py_func_op:
S
sneaxiy 已提交
82 83 84
            hidden = fluid.layers.tanh(hidden)
        else:
            new_hidden = fluid.default_main_program().current_block(
85 86 87 88 89 90 91 92
            ).create_var(name='hidden_{}'.format(idx),
                         dtype='float32',
                         shape=hidden.shape)
            hidden = fluid.layers.py_func(func=tanh,
                                          x=hidden,
                                          out=new_hidden,
                                          backward_func=tanh_grad,
                                          skip_vars_in_backward_input=hidden)
S
sneaxiy 已提交
93 94 95 96 97 98 99

    prediction = fluid.layers.fc(hidden, size=10, act='softmax')
    if not use_py_func_op:
        loss = fluid.layers.cross_entropy(input=prediction, label=label)
    else:
        loss = fluid.default_main_program().current_block().create_var(
            name='loss', dtype='float32', shape=[-1, 1])
100 101 102 103 104
        loss = fluid.layers.py_func(func=cross_entropy,
                                    x=[prediction, label],
                                    out=loss,
                                    backward_func=cross_entropy_grad,
                                    skip_vars_in_backward_input=loss)
S
sneaxiy 已提交
105

S
sneaxiy 已提交
106 107
        dummy_var = fluid.default_main_program().current_block().create_var(
            name='test_tmp_var', dtype='float32', shape=[1])
108 109 110
        fluid.layers.py_func(func=dummy_func_with_no_input,
                             x=None,
                             out=dummy_var)
S
sneaxiy 已提交
111
        loss += dummy_var
S
sneaxiy 已提交
112 113
        fluid.layers.py_func(func=dummy_func_with_no_output, x=loss, out=None)

114 115 116 117
        loss_out = fluid.default_main_program().current_block().create_var(
            dtype='float32', shape=[-1, 1])
        dummy_var_out = fluid.default_main_program().current_block().create_var(
            dtype='float32', shape=[1])
118 119 120
        fluid.layers.py_func(func=dummy_func_with_multi_input_output,
                             x=(loss, dummy_var),
                             out=(loss_out, dummy_var_out))
121 122 123
        assert loss == loss_out and dummy_var == dummy_var_out, \
            "py_func failed with multi input and output"

124 125 126
        fluid.layers.py_func(func=dummy_func_with_multi_input_output,
                             x=[loss, dummy_var],
                             out=[loss_out, dummy_var_out])
127 128 129
        assert loss == loss_out and dummy_var == dummy_var_out, \
            "py_func failed with multi input and output"

S
sneaxiy 已提交
130 131 132 133 134
    loss = fluid.layers.mean(loss)
    return loss


def reader():
S
sneaxiy 已提交
135
    for _ in six.moves.range(dev_cnt * 100):
136 137 138
        yield np.random.random([784]), np.random.random_integers(size=[1],
                                                                 low=0,
                                                                 high=9)
S
sneaxiy 已提交
139 140


S
sneaxiy 已提交
141
def test_main(use_cuda, use_py_func_op, use_parallel_executor):
S
sneaxiy 已提交
142 143 144 145 146
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return None

    with fluid.program_guard(fluid.Program(), fluid.Program()):
        with fluid.scope_guard(fluid.core.Scope()):
C
cnn 已提交
147
            gen = paddle.seed(1)
S
sneaxiy 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160
            np.random.seed(1)
            img = fluid.layers.data(name='image', shape=[784], dtype='float32')
            label = fluid.layers.data(name='label', shape=[1], dtype='int64')
            loss = simple_fc_net(img, label, use_py_func_op)
            optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
            optimizer.minimize(loss)

            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
            r = paddle.batch(reader, batch_size=10)

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
161

C
chengduo 已提交
162 163
            train_cp = fluid.default_main_program()

S
sneaxiy 已提交
164
            if use_parallel_executor:
165 166
                train_cp = compiler.CompiledProgram(
                    fluid.default_main_program())
167
                train_cp = train_cp.with_data_parallel(loss_name=loss.name)
S
sneaxiy 已提交
168 169 170 171
                fetch_list = [loss.name]
            else:
                fetch_list = [loss]

S
sneaxiy 已提交
172 173 174
            ret = []
            for epoch_id in six.moves.range(2):
                for d in r():
175 176 177
                    L, = exe.run(train_cp,
                                 feed=feeder.feed(d),
                                 fetch_list=fetch_list)
S
sneaxiy 已提交
178
                    ret.append(L)
S
sneaxiy 已提交
179 180 181
            return np.array(ret)


S
sneaxiy 已提交
182
class TestPyFuncOpUseExecutor(unittest.TestCase):
183

S
sneaxiy 已提交
184 185 186
    def setUp(self):
        self.use_parallel_executor = False

S
sneaxiy 已提交
187 188
    def test_loss_diff(self):
        for use_cuda in [True, False]:
L
Leo Chen 已提交
189
            losses = []
S
sneaxiy 已提交
190
            for use_py_func_op in [True, False]:
S
sneaxiy 已提交
191 192
                L = test_main(use_cuda, use_py_func_op,
                              self.use_parallel_executor)
S
sneaxiy 已提交
193 194 195
                if L is not None:
                    losses.append(L)

L
Leo Chen 已提交
196 197 198
                for idx in six.moves.range(len(losses) - 1):
                    max_diff = np.max(np.abs(losses[idx] - losses[0]))
                    self.assertAlmostEqual(max_diff, 0, delta=1e-3)
S
sneaxiy 已提交
199 200


S
sneaxiy 已提交
201
class TestPyFuncOpUseParallelExecutor(TestPyFuncOpUseExecutor):
202

S
sneaxiy 已提交
203 204 205 206
    def setUp(self):
        self.use_parallel_executor = True


S
sneaxiy 已提交
207 208
if __name__ == '__main__':
    unittest.main()