提交 bb71d1a4 编写于 作者: L LielinJiang

refine code

上级 79570901
...@@ -8,6 +8,7 @@ import time ...@@ -8,6 +8,7 @@ import time
import glob import glob
import numpy as np import numpy as np
from imageio import imread, imsave from imageio import imread, imsave
from tqdm import tqdm
import cv2 import cv2
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -175,8 +176,7 @@ class VideoFrameInterp(object): ...@@ -175,8 +176,7 @@ class VideoFrameInterp(object):
if not os.path.exists(os.path.join(frame_path_combined, vidname)): if not os.path.exists(os.path.join(frame_path_combined, vidname)):
os.makedirs(os.path.join(frame_path_combined, vidname)) os.makedirs(os.path.join(frame_path_combined, vidname))
for i in range(frame_num - 1): for i in tqdm(range(frame_num - 1)):
print(frames[i])
first = frames[i] first = frames[i]
second = frames[i + 1] second = frames[i + 1]
...@@ -208,12 +208,10 @@ class VideoFrameInterp(object): ...@@ -208,12 +208,10 @@ class VideoFrameInterp(object):
assert (X0.shape[1] == X1.shape[1]) assert (X0.shape[1] == X1.shape[1])
assert (X0.shape[2] == X1.shape[2]) assert (X0.shape[2] == X1.shape[2])
print("size before padding ", X0.shape)
X0 = np.pad(X0, ((0,0), (padding_top, padding_bottom), \ X0 = np.pad(X0, ((0,0), (padding_top, padding_bottom), \
(padding_left, padding_right)), mode='edge') (padding_left, padding_right)), mode='edge')
X1 = np.pad(X1, ((0,0), (padding_top, padding_bottom), \ X1 = np.pad(X1, ((0,0), (padding_top, padding_bottom), \
(padding_left, padding_right)), mode='edge') (padding_left, padding_right)), mode='edge')
print("size after padding ", X0.shape)
X0 = np.expand_dims(X0, axis=0) X0 = np.expand_dims(X0, axis=0)
X1 = np.expand_dims(X1, axis=0) X1 = np.expand_dims(X1, axis=0)
...@@ -233,8 +231,6 @@ class VideoFrameInterp(object): ...@@ -233,8 +231,6 @@ class VideoFrameInterp(object):
proc_timer.update(time.time() - proc_end) proc_timer.update(time.time() - proc_end)
tot_timer.update(time.time() - end) tot_timer.update(time.time() - end)
end = time.time() end = time.time()
print("*********** current image process time \t " +
str(time.time() - proc_end) + "s *********")
y_ = [ y_ = [
np.transpose( np.transpose(
......
...@@ -22,7 +22,7 @@ class AverageMeter(object): ...@@ -22,7 +22,7 @@ class AverageMeter(object):
def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
vid_name = vid_path.split('/')[-1].split('.')[0] vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(outpath, vid_name) out_full_path = os.path.join(outpath, vid_name)
...@@ -66,7 +66,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): ...@@ -66,7 +66,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
def frames_to_video_ffmpeg(framepath, videopath, r): def frames_to_video_ffmpeg(framepath, videopath, r):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
cmd = ffmpeg + [ cmd = ffmpeg + [
' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ', ' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ',
' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath ' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath
...@@ -99,7 +99,8 @@ def combine_frames(input, interpolated, combined, num_frames): ...@@ -99,7 +99,8 @@ def combine_frames(input, interpolated, combined, num_frames):
for k in range(num_frames): for k in range(num_frames):
src = frames2[i * num_frames + k] src = frames2[i * num_frames + k]
dst = os.path.join( dst = os.path.join(
combined, '{:08d}.png'.format(i * (num_frames + 1) + k + 1)) combined,
'{:08d}.png'.format(i * (num_frames + 1) + k + 1))
shutil.copy2(src, dst) shutil.copy2(src, dst)
except Exception as e: except Exception as e:
print(e) print(e)
......
...@@ -147,10 +147,8 @@ def custom_conv_layer(ni: int, ...@@ -147,10 +147,8 @@ def custom_conv_layer(ni: int,
stride=stride, stride=stride,
padding=padding) padding=padding)
if norm_type == 'Weight': if norm_type == 'Weight':
print('use weight norm')
conv = nn.utils.weight_norm(conv) conv = nn.utils.weight_norm(conv)
elif norm_type == 'Spectral': elif norm_type == 'Spectral':
# pass
conv = Spectralnorm(conv) conv = Spectralnorm(conv)
layers = [conv] layers = [conv]
if use_activ: if use_activ:
......
...@@ -29,7 +29,7 @@ DeOldify_weight_url = 'https://paddlegan.bj.bcebos.com/applications/DeOldify_sta ...@@ -29,7 +29,7 @@ DeOldify_weight_url = 'https://paddlegan.bj.bcebos.com/applications/DeOldify_sta
def frames_to_video_ffmpeg(framepath, videopath, r): def frames_to_video_ffmpeg(framepath, videopath, r):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
cmd = ffmpeg + [ cmd = ffmpeg + [
' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ', ' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ',
' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath ' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath
...@@ -139,7 +139,7 @@ class DeOldifyPredictor(): ...@@ -139,7 +139,7 @@ class DeOldifyPredictor():
def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
vid_name = vid_path.split('/')[-1].split('.')[0] vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(outpath, 'frames_input') out_full_path = os.path.join(outpath, 'frames_input')
...@@ -169,7 +169,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): ...@@ -169,7 +169,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_imperative() paddle.disable_static()
args = parser.parse_args() args = parser.parse_args()
predictor = DeOldifyPredictor(args.input, predictor = DeOldifyPredictor(args.input,
......
...@@ -2,19 +2,20 @@ import cv2 ...@@ -2,19 +2,20 @@ import cv2
import numpy as np import numpy as np
def read_img(path, size=None, is_gt=False): def read_img(path, size=None, is_gt=False):
"""read image by cv2 """read image by cv2
return: Numpy float32, HWC, BGR, [0,1]""" return: Numpy float32, HWC, BGR, [0,1]"""
# print('debug:', path)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
img = img.astype(np.float32) / 255. img = img.astype(np.float32) / 255.
if img.ndim == 2: if img.ndim == 2:
img = np.expand_dims(img, axis=2) img = np.expand_dims(img, axis=2)
if img.shape[2] > 3: if img.shape[2] > 3:
img = img[:, :, :3] img = img[:, :, :3]
return img return img
def get_test_neighbor_frames(crt_i, N, max_n, padding='new_info'): def get_test_neighbor_frames(crt_i, N, max_n, padding='new_info'):
"""Generate an index list for reading N frames from a sequence of images """Generate an index list for reading N frames from a sequence of images
...@@ -62,7 +63,7 @@ def get_test_neighbor_frames(crt_i, N, max_n, padding='new_info'): ...@@ -62,7 +63,7 @@ def get_test_neighbor_frames(crt_i, N, max_n, padding='new_info'):
else: else:
add_idx = i add_idx = i
return_l.append(add_idx) return_l.append(add_idx)
# name_b = '{:08d}'.format(crt_i) # name_b = '{:08d}'.format(crt_i)
return return_l return return_l
...@@ -70,7 +71,6 @@ class EDVRDataset: ...@@ -70,7 +71,6 @@ class EDVRDataset:
def __init__(self, frame_paths): def __init__(self, frame_paths):
self.frames = frame_paths self.frames = frame_paths
def __getitem__(self, index): def __getitem__(self, index):
indexs = get_test_neighbor_frames(index, 5, len(self.frames)) indexs = get_test_neighbor_frames(index, 5, len(self.frames))
frame_list = [] frame_list = []
...@@ -79,7 +79,6 @@ class EDVRDataset: ...@@ -79,7 +79,6 @@ class EDVRDataset:
frame_list.append(img) frame_list.append(img)
img_LQs = np.stack(frame_list, axis=0) img_LQs = np.stack(frame_list, axis=0)
print('img:', img_LQs.shape)
# BGR to RGB, HWC to CHW, numpy to tensor # BGR to RGB, HWC to CHW, numpy to tensor
img_LQs = img_LQs[:, :, :, [2, 1, 0]] img_LQs = img_LQs[:, :, :, [2, 1, 0]]
img_LQs = np.transpose(img_LQs, (0, 3, 1, 2)).astype('float32') img_LQs = np.transpose(img_LQs, (0, 3, 1, 2)).astype('float32')
...@@ -87,4 +86,4 @@ class EDVRDataset: ...@@ -87,4 +86,4 @@ class EDVRDataset:
return img_LQs, self.frames[index] return img_LQs, self.frames[index]
def __len__(self): def __len__(self):
return len(self.frames) return len(self.frames)
\ No newline at end of file
...@@ -27,6 +27,7 @@ import numpy as np ...@@ -27,6 +27,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import cv2 import cv2
from tqdm import tqdm
from data import EDVRDataset from data import EDVRDataset
from paddle.utils.download import get_path_from_url from paddle.utils.download import get_path_from_url
...@@ -52,7 +53,6 @@ def parse_args(): ...@@ -52,7 +53,6 @@ def parse_args():
def get_img(pred): def get_img(pred):
print('pred shape', pred.shape)
pred = pred.squeeze() pred = pred.squeeze()
pred = np.clip(pred, a_min=0., a_max=1.0) pred = np.clip(pred, a_min=0., a_max=1.0)
pred = pred * 255 pred = pred * 255
...@@ -72,7 +72,7 @@ def save_img(img, framename): ...@@ -72,7 +72,7 @@ def save_img(img, framename):
def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
vid_name = vid_path.split('/')[-1].split('.')[0] vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(outpath, 'frames_input') out_full_path = os.path.join(outpath, 'frames_input')
...@@ -102,7 +102,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): ...@@ -102,7 +102,7 @@ def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
def frames_to_video_ffmpeg(framepath, videopath, r): def frames_to_video_ffmpeg(framepath, videopath, r):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
cmd = ffmpeg + [ cmd = ffmpeg + [
' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ', ' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ',
' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath ' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath
...@@ -164,7 +164,7 @@ class EDVRPredictor: ...@@ -164,7 +164,7 @@ class EDVRPredictor:
periods = [] periods = []
cur_time = time.time() cur_time = time.time()
for infer_iter, data in enumerate(dataset): for infer_iter, data in enumerate(tqdm(dataset)):
data_feed_in = [data[0]] data_feed_in = [data[0]]
infer_outs = self.exe.run( infer_outs = self.exe.run(
...@@ -185,7 +185,7 @@ class EDVRPredictor: ...@@ -185,7 +185,7 @@ class EDVRPredictor:
period = cur_time - prev_time period = cur_time - prev_time
periods.append(period) periods.append(period)
print('Processed {} samples'.format(infer_iter + 1)) # print('Processed {} samples'.format(infer_iter + 1))
frame_pattern_combined = os.path.join(pred_frame_path, '%08d.png') frame_pattern_combined = os.path.join(pred_frame_path, '%08d.png')
vid_out_path = os.path.join(self.output, vid_out_path = os.path.join(self.output,
'{}_edvr_out.mp4'.format(base_name)) '{}_edvr_out.mp4'.format(base_name))
......
...@@ -28,7 +28,7 @@ RealSR_weight_url = 'https://paddlegan.bj.bcebos.com/applications/DF2K_JPEG.pdpa ...@@ -28,7 +28,7 @@ RealSR_weight_url = 'https://paddlegan.bj.bcebos.com/applications/DF2K_JPEG.pdpa
def frames_to_video_ffmpeg(framepath, videopath, r): def frames_to_video_ffmpeg(framepath, videopath, r):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
cmd = ffmpeg + [ cmd = ffmpeg + [
' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ', ' -r ', r, ' -f ', ' image2 ', ' -i ', framepath, ' -vcodec ',
' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath ' libx264 ', ' -pix_fmt ', ' yuv420p ', ' -crf ', ' 16 ', videopath
...@@ -110,7 +110,7 @@ class RealSRPredictor(): ...@@ -110,7 +110,7 @@ class RealSRPredictor():
def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None): def dump_frames_ffmpeg(vid_path, outpath, r=None, ss=None, t=None):
ffmpeg = ['ffmpeg ', ' -loglevel ', ' error '] ffmpeg = ['ffmpeg ', ' -y -loglevel ', ' error ']
vid_name = vid_path.split('/')[-1].split('.')[0] vid_name = vid_path.split('/')[-1].split('.')[0]
out_full_path = os.path.join(outpath, 'frames_input') out_full_path = os.path.join(outpath, 'frames_input')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册