未验证 提交 a336c7fe 编写于 作者: A AUTOMATIC1111 提交者: GitHub

Merge pull request #9017 from camenduru/dev

convert to python v3.9
......@@ -2,6 +2,7 @@ import glob
import os
import re
import torch
from typing import Union
from modules import shared, devices, sd_models, errors
......@@ -235,7 +236,7 @@ def lora_calc_updown(lora, module, target):
return updown
def lora_apply_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.MultiheadAttention):
def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of Loras to the weights of torch layer self.
If weights already have this particular set of loras applied, does nothing.
......@@ -295,7 +296,7 @@ def lora_apply_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Multih
setattr(self, "lora_current_names", wanted_names)
def lora_reset_cached_weight(self: torch.nn.Conv2d | torch.nn.Linear):
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ())
setattr(self, "lora_weights_backup", None)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册