Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
linkedin_61247605
DeepMosaics
提交
2bbda351
DeepMosaics
项目概览
linkedin_61247605
/
DeepMosaics
与 Fork 源项目一致
Fork自
Hypo / DeepMosaics
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
DeepMosaics
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
2bbda351
编写于
4月 19, 2021
作者:
H
hypox64
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
BVDNet SpectralNorm
上级
8f4e9158
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
78 addition
and
17 deletion
+78
-17
.gitignore
.gitignore
+1
-0
models/BVDNet.py
models/BVDNet.py
+36
-16
models/model_util.py
models/model_util.py
+41
-1
未找到文件。
.gitignore
浏览文件 @
2bbda351
...
...
@@ -143,6 +143,7 @@ video_tmp/
result/
nohup.out
#./
/.vscode
/pix2pix
/pix2pixHD
/tmp
...
...
models/BVDNet.py
浏览文件 @
2bbda351
...
...
@@ -4,17 +4,38 @@ import torch.nn.functional as F
from
.pix2pixHD_model
import
*
from
.model_util
import
*
class
UpBlock
(
nn
.
Module
):
def
__init__
(
self
,
in_channel
,
out_channel
,
kernel_size
=
3
,
padding
=
1
):
super
().
__init__
()
self
.
convup
=
nn
.
Sequential
(
nn
.
Upsample
(
scale_factor
=
2
,
mode
=
'bilinear'
,
align_corners
=
False
),
nn
.
ReflectionPad2d
(
padding
),
# EqualConv2d(out_channel, out_channel, kernel_size, padding=padding),
SpectralNorm
(
nn
.
Conv2d
(
in_channel
,
out_channel
,
kernel_size
)),
nn
.
LeakyReLU
(
0.2
),
# Blur(out_channel),
)
def
forward
(
self
,
input
):
outup
=
self
.
convup
(
input
)
return
outup
class
Encoder2d
(
nn
.
Module
):
def
__init__
(
self
,
input_nc
,
ngf
=
64
,
n_downsampling
=
3
,
norm_layer
=
nn
.
BatchNorm2d
,
activation
=
nn
.
ReLU
(
True
)):
def
__init__
(
self
,
input_nc
,
ngf
=
64
,
n_downsampling
=
3
,
activation
=
nn
.
LeakyReLU
(
0.2
)):
super
(
Encoder2d
,
self
).
__init__
()
model
=
[
nn
.
ReflectionPad2d
(
3
),
nn
.
Conv2d
(
input_nc
,
ngf
,
kernel_size
=
7
,
padding
=
0
),
norm_layer
(
ngf
),
activation
]
model
=
[
nn
.
ReflectionPad2d
(
3
),
SpectralNorm
(
nn
.
Conv2d
(
input_nc
,
ngf
,
kernel_size
=
7
,
padding
=
0
)
),
activation
]
### downsample
for
i
in
range
(
n_downsampling
):
mult
=
2
**
i
model
+=
[
nn
.
ReflectionPad2d
(
1
),
nn
.
Conv2d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
0
),
norm_layer
(
ngf
*
mult
*
2
),
activation
]
model
+=
[
nn
.
ReflectionPad2d
(
1
),
SpectralNorm
(
nn
.
Conv2d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)),
activation
]
self
.
model
=
nn
.
Sequential
(
*
model
)
...
...
@@ -22,15 +43,15 @@ class Encoder2d(nn.Module):
return
self
.
model
(
input
)
class
Encoder3d
(
nn
.
Module
):
def
__init__
(
self
,
input_nc
,
ngf
=
64
,
n_downsampling
=
3
,
norm_layer
=
nn
.
BatchNorm3d
,
activation
=
nn
.
ReLU
(
True
)):
def
__init__
(
self
,
input_nc
,
ngf
=
64
,
n_downsampling
=
3
,
activation
=
nn
.
LeakyReLU
(
0.2
)):
super
(
Encoder3d
,
self
).
__init__
()
model
=
[
nn
.
Conv3d
(
input_nc
,
ngf
,
kernel_size
=
3
,
padding
=
1
),
norm_layer
(
ngf
),
activation
]
model
=
[
SpectralNorm
(
nn
.
Conv3d
(
input_nc
,
ngf
,
kernel_size
=
3
,
padding
=
1
)
),
activation
]
### downsample
for
i
in
range
(
n_downsampling
):
mult
=
2
**
i
model
+=
[
nn
.
Conv3d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
),
norm_layer
(
ngf
*
mult
*
2
),
activation
]
model
+=
[
SpectralNorm
(
nn
.
Conv3d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
),
activation
]
self
.
model
=
nn
.
Sequential
(
*
model
)
...
...
@@ -38,32 +59,31 @@ class Encoder3d(nn.Module):
return
self
.
model
(
input
)
class
BVDNet
(
nn
.
Module
):
def
__init__
(
self
,
N
,
n_downsampling
=
3
,
n_blocks
=
1
,
input_nc
=
3
,
output_nc
=
3
,
norm
=
'batch'
,
activation
=
nn
.
LeakyReLU
(
0.2
)):
def
__init__
(
self
,
N
,
n_downsampling
=
3
,
n_blocks
=
1
,
input_nc
=
3
,
output_nc
=
3
,
activation
=
nn
.
LeakyReLU
(
0.2
)):
super
(
BVDNet
,
self
).
__init__
()
ngf
=
64
padding_type
=
'reflect'
norm_layer
=
get_norm_layer
(
norm
,
'2d'
)
norm_layer_3d
=
get_norm_layer
(
norm
,
'3d'
)
self
.
N
=
N
# encoder
self
.
encoder3d
=
Encoder3d
(
input_nc
,
64
,
n_downsampling
,
norm_layer_3d
,
activation
)
self
.
encoder2d
=
Encoder2d
(
input_nc
,
64
,
n_downsampling
,
norm_layer
,
activation
)
self
.
encoder3d
=
Encoder3d
(
input_nc
,
64
,
n_downsampling
,
activation
)
self
.
encoder2d
=
Encoder2d
(
input_nc
,
64
,
n_downsampling
,
activation
)
### resnet blocks
self
.
blocks
=
[]
mult
=
2
**
n_downsampling
for
i
in
range
(
n_blocks
):
self
.
blocks
+=
[
ResnetBlock
(
ngf
*
mult
,
padding_type
=
padding_type
,
activation
=
nn
.
ReLU
(
True
),
norm_layer
=
norm_layer
)]
self
.
blocks
+=
[
ResnetBlock
SpectralNorm
(
ngf
*
mult
,
padding_type
=
padding_type
,
activation
=
activation
)]
self
.
blocks
=
nn
.
Sequential
(
*
self
.
blocks
)
### decoder
self
.
decoder
=
[]
for
i
in
range
(
n_downsampling
):
mult
=
2
**
(
n_downsampling
-
i
)
self
.
decoder
+=
[
nn
.
ConvTranspose2d
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
output_padding
=
1
),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
activation
]
self
.
decoder
+=
[
UpBlock
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
))]
# self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
# norm_layer(int(ngf * mult / 2)), activation]
# self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'),
# nn.ReflectionPad2d(1),
# nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0),
...
...
models/model_util.py
浏览文件 @
2bbda351
import
torch
import
torch.nn
as
nn
from
torch.nn
import
init
import
torch.nn.utils.spectral_norm
as
SpectralNorm
import
functools
...
...
@@ -51,4 +52,43 @@ def init_weights(net, init_type='normal', gain=0.02):
init
.
constant_
(
m
.
bias
.
data
,
0.0
)
print
(
'initialize network with %s'
%
init_type
)
net
.
apply
(
init_func
)
\ No newline at end of file
net
.
apply
(
init_func
)
class
ResnetBlockSpectralNorm
(
nn
.
Module
):
def
__init__
(
self
,
dim
,
padding_type
,
activation
=
nn
.
LeakyReLU
(
0.2
),
use_dropout
=
False
):
super
(
ResnetBlockSpectralNorm
,
self
).
__init__
()
self
.
conv_block
=
self
.
build_conv_block
(
dim
,
padding_type
,
activation
,
use_dropout
)
def
build_conv_block
(
self
,
dim
,
padding_type
,
activation
,
use_dropout
):
conv_block
=
[]
p
=
0
if
padding_type
==
'reflect'
:
conv_block
+=
[
nn
.
ReflectionPad2d
(
1
)]
elif
padding_type
==
'replicate'
:
conv_block
+=
[
nn
.
ReplicationPad2d
(
1
)]
elif
padding_type
==
'zero'
:
p
=
1
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
SpectralNorm
(
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
)),
activation
]
if
use_dropout
:
conv_block
+=
[
nn
.
Dropout
(
0.5
)]
p
=
0
if
padding_type
==
'reflect'
:
conv_block
+=
[
nn
.
ReflectionPad2d
(
1
)]
elif
padding_type
==
'replicate'
:
conv_block
+=
[
nn
.
ReplicationPad2d
(
1
)]
elif
padding_type
==
'zero'
:
p
=
1
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
SpectralNorm
(
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
))]
return
nn
.
Sequential
(
*
conv_block
)
def
forward
(
self
,
x
):
out
=
x
+
self
.
conv_block
(
x
)
return
out
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录