test_fill_op.py 2.7 KB
Newer Older
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Y
Yu Yang 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest
20
import paddle.fluid.core as core
21
from paddle.fluid.op import Operator
Y
Yu Yang 已提交
22 23


24
class TestFillOp1(OpTest):
Y
Yu Yang 已提交
25 26 27 28 29 30 31
    def setUp(self):
        self.op_type = "fill"
        val = np.random.random(size=[100, 200])
        self.inputs = {}
        self.attrs = {
            'value': val.flatten().tolist(),
            'shape': [100, 200],
32 33
            'dtype': int(core.VarDesc.VarType.FP64),
            'force_cpu': False
Y
Yu Yang 已提交
34 35 36 37
        }
        self.outputs = {'Out': val.astype('float64')}

    def test_check_output(self):
38
        self.check_output()
Y
Yu Yang 已提交
39 40


41 42 43 44 45 46 47 48 49 50 51 52 53 54
class TestFillOp2(OpTest):
    def setUp(self):
        self.op_type = "fill"
        val = np.random.random(size=[100, 200])
        self.inputs = {}
        self.attrs = {
            'value': val.flatten().tolist(),
            'shape': [100, 200],
            'dtype': int(core.VarDesc.VarType.FP64),
            'force_cpu': True
        }
        self.outputs = {'Out': val.astype('float64')}

    def test_check_output(self):
55
        self.check_output()
56 57


58
class TestFillOp3(unittest.TestCase):
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    def check_with_place(self, place, f_cpu):
        scope = core.Scope()
        # create Out Variable
        out = scope.var('Out').get_tensor()

        # create and run fill_op operator
        val = np.random.random(size=[300, 200])
        fill_op = Operator(
            "fill",
            value=val.flatten(),
            shape=[300, 200],
            dtype=int(core.VarDesc.VarType.FP32),
            force_cpu=f_cpu,
            Out='Out')
        fill_op.run(scope, place)

        # get result from Out
        result_array = np.array(out)
        full_array = np.array(val, 'float32')

        self.assertTrue(np.array_equal(result_array, full_array))

    def test_fill_op(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))

        for place in places:
            self.check_with_place(place, True)
            self.check_with_place(place, False)


Y
Yu Yang 已提交
91 92
if __name__ == '__main__':
    unittest.main()