From 801159ceeb20e05ca71582f640cba05ee2651ac5 Mon Sep 17 00:00:00 2001 From: hlygit66666 <32728786+hlygit66666@users.noreply.github.com> Date: Wed, 26 Jan 2022 19:43:42 +0800 Subject: [PATCH] Add FuseBatchNormAddActPass and unittest. (#39178) * add fuse_relu_depthwise_conv_pass unittest * fix atol and rtol * fix according to review * add FuseBatchNormAddActPass and unittest * Update test_dist_fuse_bn_add_act_pass.py * solve conflict --- python/paddle/distributed/passes/cpp_pass.py | 13 +++ .../test_dist_fuse_bn_add_act_pass.py | 100 ++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py diff --git a/python/paddle/distributed/passes/cpp_pass.py b/python/paddle/distributed/passes/cpp_pass.py index 6b8ea30f3ba..97243b805cd 100644 --- a/python/paddle/distributed/passes/cpp_pass.py +++ b/python/paddle/distributed/passes/cpp_pass.py @@ -39,3 +39,16 @@ class FuseBatchNormActPass(CPPPassWrapper): def _type(self): return PassType.FUSION_OPT + + +@register_pass("fuse_bn_add_act") +class FuseBatchNormAddActPass(CPPPassWrapper): + def __init__(self): + super(FuseBatchNormAddActPass, self).__init__() + + @property + def cpp_name(self): + return "fuse_bn_add_act_pass" + + def _type(self): + return PassType.FUSION_OPT diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py new file mode 100644 index 00000000000..1b01260eaf2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import paddle +import paddle.distributed.fleet as fleet +import numpy as np +import paddle.nn as nn +from paddle.distributed.passes import new_pass, PassManager +import unittest +from dist_pass_test_base import DistPassTestBase + + +class BatchNormAddActNet(nn.Layer): + def __init__(self): + super(BatchNormAddActNet, self).__init__() + + self.conv1 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC") + self.conv2 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC") + self.bn1 = nn.BatchNorm2D(8, data_format="NHWC") + self.bn2 = nn.BatchNorm2D(8, data_format="NHWC") + self.relu = nn.ReLU() + + def forward(self, x): + y = self.conv1(x) + y = self.bn1(y) + out = self.conv2(x) + out = self.bn2(out) + y + out = self.relu(out) + out = paddle.flatten(out, 1) + return out + + +class TestFuseBatchNormAddActPass(DistPassTestBase): + def init(self): + self.atol = 1e-4 + self.rtol = 1e-4 + + def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image') + + model = BatchNormAddActNet() + pred_out = model(image) + loss = paddle.mean(pred_out) + optimizer = paddle.optimizer.Adam(learning_rate=1e-3) + + dist_strategy = fleet.DistributedStrategy() + dist_strategy.fuse_all_reduce_ops = False + dist_strategy.without_graph_optimization = True + dist_strategy.amp = True + dist_strategy.amp_configs = { + "init_loss_scaling": 32768, + "use_dynamic_loss_scaling": True, + } + fleet.init(is_collective=True, strategy=dist_strategy) + optimizer = fleet.distributed_optimizer(optimizer) + optimizer.minimize(loss) + + rank = paddle.distributed.get_rank() + + def reader(): + seed = int(os.environ.get("SEED", 0)) + np.random.seed(seed + rank) + for _ in range(10): + image_np = np.random.random(size=image.shape).astype('float32') + yield image_np, + + main_program = paddle.static.default_main_program() + startup_program = paddle.static.default_startup_program() + return main_program, startup_program, [image], [loss], reader + + def apply_passes(self, main_prog, startup_prog): + pass_manager = PassManager([new_pass("fuse_bn_add_act")]) + pass_manager.apply([main_prog], [startup_prog]) + print(pass_manager.names) + + op_type = [] + for op in main_prog.global_block().ops: + op_type.append(op.type) + self.assertTrue("fused_bn_add_activation" in op_type) + self.assertTrue("fused_bn_add_activation_grad" in op_type) + + def test_fuse_bn_add_act(self): + self.check_main() + + +if __name__ == "__main__": + unittest.main() -- GitLab