未验证 提交 43696bbe 编写于 作者: Z Zeng Jinle 提交者: GitHub

Move benchmark changing of cycle gan to models/develop (#2409)

* move benchmark changing of cycle gan to models/develop, test=develop

* add flag exist check, test=develop
上级 ee715d55
......@@ -4,10 +4,9 @@ import numpy as np
import os
# cudnn is not better when batch size is 1.
use_cudnn = False
if 'ce_mode' in os.environ:
use_cudnn = False
use_cudnn_conv2d_transpose = False
use_cudnn_conv2d = True
use_layer_norm = True
def cal_padding(img_size, stride, filter_size, dilation=1):
"""Calculate padding size."""
......@@ -21,7 +20,9 @@ def cal_padding(img_size, stride, filter_size, dilation=1):
def instance_norm(input, name=None):
# TODO(lvmengsi@baidu.com): Check the accuracy when using fluid.layers.layer_norm.
# return fluid.layers.layer_norm(input, begin_norm_axis=2)
if use_layer_norm:
return fluid.layers.layer_norm(input, begin_norm_axis=2)
helper = fluid.layer_helper.LayerHelper("instance_norm", **locals())
dtype = helper.input_dtype()
epsilon = 1e-5
......@@ -90,7 +91,7 @@ def conv2d(input,
name=name,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
use_cudnn=use_cudnn_conv2d,
param_attr=param_attr,
bias_attr=bias_attr)
if need_crop:
......@@ -145,7 +146,7 @@ def deconv2d(input,
filter_size=filter_size,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
use_cudnn=use_cudnn_conv2d_transpose,
param_attr=param_attr,
bias_attr=bias_attr)
......
......@@ -2,6 +2,28 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def set_paddle_flags(flags):
for key, value in flags.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
set_paddle_flags({
'FLAGS_cudnn_exhaustive_search': 1,
'FLAGS_conv_workspace_size_limit': 256,
'FLAGS_eager_delete_tensor_gb': 0, # enable gc
# You can omit the following settings, because the default
# value of FLAGS_memory_fraction_of_eager_deletion is 1,
# and default value of FLAGS_fast_eager_deletion_mode is 1
'FLAGS_memory_fraction_of_eager_deletion': 1,
'FLAGS_fast_eager_deletion_mode': 1
})
import random
import sys
import paddle
......@@ -150,18 +172,30 @@ def train(args):
build_strategy.enable_inplace = False
build_strategy.memory_optimize = False
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.use_experimental_executor = True
g_A_trainer_program = fluid.CompiledProgram(
g_A_trainer.program).with_data_parallel(
loss_name=g_A_trainer.g_loss_A.name, build_strategy=build_strategy)
loss_name=g_A_trainer.g_loss_A.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
g_B_trainer_program = fluid.CompiledProgram(
g_B_trainer.program).with_data_parallel(
loss_name=g_B_trainer.g_loss_B.name, build_strategy=build_strategy)
loss_name=g_B_trainer.g_loss_B.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
d_B_trainer_program = fluid.CompiledProgram(
d_B_trainer.program).with_data_parallel(
loss_name=d_B_trainer.d_loss_B.name, build_strategy=build_strategy)
loss_name=d_B_trainer.d_loss_B.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
d_A_trainer_program = fluid.CompiledProgram(
d_A_trainer.program).with_data_parallel(
loss_name=d_A_trainer.d_loss_A.name, build_strategy=build_strategy)
loss_name=d_A_trainer.d_loss_A.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
for epoch in range(args.epoch):
batch_id = 0
for i in range(max_images_num):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册