test_profiler.py 5.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import unittest
18
import os
19
import tempfile
20
import numpy as np
21 22 23 24
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.layers as layers
import paddle.fluid.core as core
25
from paddle.fluid import compiler, Program, program_guard
26
import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2
D
dangqingqing 已提交
27 28


29
class TestProfiler(unittest.TestCase):
30 31 32 33
    @classmethod
    def setUpClass(cls):
        os.environ['CPU_NUM'] = str(4)

34 35 36 37 38
    def net_profiler(self,
                     state,
                     option,
                     iter_range=None,
                     use_parallel_executor=False):
39 40
        profile_path = os.path.join(tempfile.gettempdir(), "profile")
        open(profile_path, "w").write("")
41 42 43 44 45
        startup_program = fluid.Program()
        main_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            image = fluid.layers.data(name='x', shape=[784], dtype='float32')
X
Xin Pan 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
            hidden1 = fluid.layers.fc(input=image, size=64, act='relu')
            i = layers.zeros(shape=[1], dtype='int64')
            counter = fluid.layers.zeros(
                shape=[1], dtype='int64', force_cpu=True)
            until = layers.fill_constant([1], dtype='int64', value=10)
            data_arr = layers.array_write(hidden1, i)
            cond = fluid.layers.less_than(x=counter, y=until)
            while_op = fluid.layers.While(cond=cond)
            with while_op.block():
                hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
                layers.array_write(hidden_n, i, data_arr)
                fluid.layers.increment(x=counter, value=1, in_place=True)
                layers.less_than(x=counter, y=until, cond=cond)

            hidden_n = layers.array_read(data_arr, i)
            hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
62 63 64
            predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
            label = fluid.layers.data(name='y', shape=[1], dtype='int64')
            cost = fluid.layers.cross_entropy(input=predict, label=label)
Y
Yu Yang 已提交
65
            avg_cost = fluid.layers.mean(cost)
F
fengjiayi 已提交
66 67 68
            batch_size = fluid.layers.create_tensor(dtype='int64')
            batch_acc = fluid.layers.accuracy(
                input=predict, label=label, total=batch_size)
69

70
        optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
71 72 73 74 75
        opts = optimizer.minimize(avg_cost, startup_program=startup_program)

        place = fluid.CPUPlace() if state == 'CPU' else fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        exe.run(startup_program)
76 77 78 79 80
        if use_parallel_executor:
            pe = fluid.ParallelExecutor(
                state != 'CPU',
                loss_name=avg_cost.name,
                main_program=main_program)
81

F
fengjiayi 已提交
82
        pass_acc_calculator = fluid.average.WeightedAverage()
83
        with profiler.profiler(state, 'total', profile_path, option) as prof:
84 85 86 87 88
            for iter in range(10):
                if iter == 2:
                    profiler.reset_profiler()
                x = np.random.random((32, 784)).astype("float32")
                y = np.random.randint(0, 10, (32, 1)).astype("int64")
89

90 91 92
                if use_parallel_executor:
                    pe.run(feed={'x': x, 'y': y}, fetch_list=[avg_cost.name])
                    continue
93 94 95
                outs = exe.run(main_program,
                               feed={'x': x,
                                     'y': y},
F
fengjiayi 已提交
96
                               fetch_list=[avg_cost, batch_acc, batch_size])
97
                acc = np.array(outs[1])
F
fengjiayi 已提交
98 99 100
                b_size = np.array(outs[2])
                pass_acc_calculator.add(value=acc, weight=b_size)
                pass_acc = pass_acc_calculator.eval()
101
        data = open(profile_path, 'rb').read()
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
        if (len(data) > 0):
            profile_pb = profiler_pb2.Profile()
            profile_pb.ParseFromString(data)
            self.assertGreater(len(profile_pb.events), 0)
            for event in profile_pb.events:
                if event.type == profiler_pb2.Event.GPUKernel:
                    if not event.detail_info and not event.name.startswith(
                            "MEM"):
                        raise Exception(
                            "Kernel %s missing event. Has this kernel been recorded by RecordEvent?"
                            % event.name)
                elif event.type == profiler_pb2.Event.CPU and (
                        event.name.startswith("Driver API") or
                        event.name.startswith("Runtime API")):
                    print("Warning: unregister", event.name)
117

D
dangqingqing 已提交
118
    def test_cpu_profiler(self):
119 120
        self.net_profiler('CPU', "Default")
        self.net_profiler('CPU', "Default", use_parallel_executor=True)
121

122 123
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "profiler is enabled only with GPU")
D
dangqingqing 已提交
124
    def test_cuda_profiler(self):
125 126
        self.net_profiler('GPU', "OpDetail")
        self.net_profiler('GPU', "OpDetail", use_parallel_executor=True)
127

128 129
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "profiler is enabled only with GPU")
130
    def test_all_profiler(self):
131 132
        self.net_profiler('All', "AllOpDetail")
        self.net_profiler('All', "AllOpDetail", use_parallel_executor=True)
133

D
dangqingqing 已提交
134

135 136
if __name__ == '__main__':
    unittest.main()