Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Overbill1683
Stable Diffusion Webui
提交
f126986b
S
Stable Diffusion Webui
项目概览
Overbill1683
/
Stable Diffusion Webui
10 个月 前同步成功
通知
1748
Star
81
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
分析
仓库
DevOps
项目成员
Pages
S
Stable Diffusion Webui
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Pages
分析
分析
仓库分析
DevOps
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
提交
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
f126986b
编写于
11月 01, 2022
作者:
A
AUTOMATIC1111
提交者:
GitHub
11月 01, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4098 from jn-jairo/load-model
Unload sd_model before loading the other to solve the issue #3449
上级
08744040
af758e97
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
34 addition
and
10 deletion
+34
-10
modules/lowvram.py
modules/lowvram.py
+13
-8
modules/processing.py
modules/processing.py
+3
-0
modules/sd_hijack.py
modules/sd_hijack.py
+4
-0
modules/sd_models.py
modules/sd_models.py
+13
-1
webui.py
webui.py
+1
-1
未找到文件。
modules/lowvram.py
浏览文件 @
f126986b
...
...
@@ -38,13 +38,18 @@ def setup_for_low_vram(sd_model, use_medvram):
# see below for register_forward_pre_hook;
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
# useless here, and we just replace those methods
def
first_stage_model_encode_wrap
(
self
,
encoder
,
x
):
send_me_to_gpu
(
self
,
None
)
return
encoder
(
x
)
def
first_stage_model_decode_wrap
(
self
,
decoder
,
z
):
send_me_to_gpu
(
self
,
None
)
return
decoder
(
z
)
first_stage_model
=
sd_model
.
first_stage_model
first_stage_model_encode
=
sd_model
.
first_stage_model
.
encode
first_stage_model_decode
=
sd_model
.
first_stage_model
.
decode
def
first_stage_model_encode_wrap
(
x
):
send_me_to_gpu
(
first_stage_model
,
None
)
return
first_stage_model_encode
(
x
)
def
first_stage_model_decode_wrap
(
z
):
send_me_to_gpu
(
first_stage_model
,
None
)
return
first_stage_model_decode
(
z
)
# remove three big modules, cond, first_stage, and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
...
...
@@ -56,8 +61,8 @@ def setup_for_low_vram(sd_model, use_medvram):
# register hooks for those the first two models
sd_model
.
cond_stage_model
.
transformer
.
register_forward_pre_hook
(
send_me_to_gpu
)
sd_model
.
first_stage_model
.
register_forward_pre_hook
(
send_me_to_gpu
)
sd_model
.
first_stage_model
.
encode
=
lambda
x
,
en
=
sd_model
.
first_stage_model
.
encode
:
first_stage_model_encode_wrap
(
sd_model
.
first_stage_model
,
en
,
x
)
sd_model
.
first_stage_model
.
decode
=
lambda
z
,
de
=
sd_model
.
first_stage_model
.
decode
:
first_stage_model_decode_wrap
(
sd_model
.
first_stage_model
,
de
,
z
)
sd_model
.
first_stage_model
.
encode
=
first_stage_model_encode_wrap
sd_model
.
first_stage_model
.
decode
=
first_stage_model_decode_wrap
parents
[
sd_model
.
cond_stage_model
.
transformer
]
=
sd_model
.
cond_stage_model
if
use_medvram
:
...
...
modules/processing.py
浏览文件 @
f126986b
...
...
@@ -597,6 +597,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if
p
.
scripts
is
not
None
:
p
.
scripts
.
postprocess
(
p
,
res
)
p
.
sd_model
=
None
p
.
sampler
=
None
return
res
...
...
modules/sd_hijack.py
浏览文件 @
f126986b
...
...
@@ -94,6 +94,10 @@ class StableDiffusionModelHijack:
if
type
(
model_embeddings
.
token_embedding
)
==
EmbeddingsWithFixes
:
model_embeddings
.
token_embedding
=
model_embeddings
.
token_embedding
.
wrapped
self
.
layers
=
None
self
.
circular_enabled
=
False
self
.
clip
=
None
def
apply_circular
(
self
,
enable
):
if
self
.
circular_enabled
==
enable
:
return
...
...
modules/sd_models.py
浏览文件 @
f126986b
import
collections
import
os.path
import
sys
import
gc
from
collections
import
namedtuple
import
torch
import
re
...
...
@@ -220,6 +221,12 @@ def load_model(checkpoint_info=None):
if
checkpoint_info
.
config
!=
shared
.
cmd_opts
.
config
:
print
(
f
"Loading config from:
{
checkpoint_info
.
config
}
"
)
if
shared
.
sd_model
:
sd_hijack
.
model_hijack
.
undo_hijack
(
shared
.
sd_model
)
shared
.
sd_model
=
None
gc
.
collect
()
devices
.
torch_gc
()
sd_config
=
OmegaConf
.
load
(
checkpoint_info
.
config
)
if
should_hijack_inpainting
(
checkpoint_info
):
...
...
@@ -233,6 +240,7 @@ def load_model(checkpoint_info=None):
checkpoint_info
=
checkpoint_info
.
_replace
(
config
=
checkpoint_info
.
config
.
replace
(
".yaml"
,
"-inpainting.yaml"
))
do_inpainting_hijack
()
sd_model
=
instantiate_from_config
(
sd_config
.
model
)
load_model_weights
(
sd_model
,
checkpoint_info
)
...
...
@@ -252,14 +260,18 @@ def load_model(checkpoint_info=None):
return
sd_model
def
reload_model_weights
(
sd_model
,
info
=
None
):
def
reload_model_weights
(
sd_model
=
None
,
info
=
None
):
from
modules
import
lowvram
,
devices
,
sd_hijack
checkpoint_info
=
info
or
select_checkpoint
()
if
not
sd_model
:
sd_model
=
shared
.
sd_model
if
sd_model
.
sd_model_checkpoint
==
checkpoint_info
.
filename
:
return
if
sd_model
.
sd_checkpoint_info
.
config
!=
checkpoint_info
.
config
or
should_hijack_inpainting
(
checkpoint_info
)
!=
should_hijack_inpainting
(
sd_model
.
sd_checkpoint_info
):
del
sd_model
checkpoints_loaded
.
clear
()
load_model
(
checkpoint_info
)
return
shared
.
sd_model
...
...
webui.py
浏览文件 @
f126986b
...
...
@@ -78,7 +78,7 @@ def initialize():
modules
.
scripts
.
load_scripts
()
modules
.
sd_models
.
load_model
()
shared
.
opts
.
onchange
(
"sd_model_checkpoint"
,
wrap_queued_call
(
lambda
:
modules
.
sd_models
.
reload_model_weights
(
shared
.
sd_model
)))
shared
.
opts
.
onchange
(
"sd_model_checkpoint"
,
wrap_queued_call
(
lambda
:
modules
.
sd_models
.
reload_model_weights
()))
shared
.
opts
.
onchange
(
"sd_hypernetwork"
,
wrap_queued_call
(
lambda
:
modules
.
hypernetworks
.
hypernetwork
.
load_hypernetwork
(
shared
.
opts
.
sd_hypernetwork
)))
shared
.
opts
.
onchange
(
"sd_hypernetwork_strength"
,
modules
.
hypernetworks
.
hypernetwork
.
apply_strength
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录