network_oft.py 3.5 KB
Newer Older
V
v0xie 已提交
1 2
import torch
import network
3
from lyco_helpers import factorization
4
from einops import rearrange
V
v0xie 已提交
5 6 7 8


class ModuleTypeOFT(network.ModuleType):
    def create_module(self, net: network.Network, weights: network.NetworkWeights):
9
        if all(x in weights.w for x in ["oft_blocks"]) or all(x in weights.w for x in ["oft_diag"]):
V
v0xie 已提交
10 11 12 13
            return NetworkModuleOFT(net, weights)

        return None

V
v0xie 已提交
14 15
# Supports both kohya-ss' implementation of COFT  https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
# and KohakuBlueleaf's implementation of OFT/COFT https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py
V
v0xie 已提交
16 17
class NetworkModuleOFT(network.NetworkModule):
    def __init__(self,  net: network.Network, weights: network.NetworkWeights):
V
v0xie 已提交
18

V
v0xie 已提交
19 20
        super().__init__(net, weights)

21
        self.lin_module = None
V
v0xie 已提交
22
        self.org_module: list[torch.Module] = [self.sd_module]
23

24 25
        self.scale = 1.0

26 27 28
        # kohya-ss
        if "oft_blocks" in weights.w.keys():
            self.is_kohya = True
29
            self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
V
v0xie 已提交
30
            self.alpha = weights.w["alpha"] # alpha is constraint
31
            self.dim = self.oft_blocks.shape[0] # lora dim
V
v0xie 已提交
32
        # LyCORIS
33 34
        elif "oft_diag" in weights.w.keys():
            self.is_kohya = False
V
v0xie 已提交
35 36 37
            self.oft_blocks = weights.w["oft_diag"]
            # self.alpha is unused
            self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
38 39 40

        is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
        is_conv = type(self.sd_module) in [torch.nn.Conv2d]
V
v0xie 已提交
41
        is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
42

43
        if is_linear:
V
v0xie 已提交
44
            self.out_dim = self.sd_module.out_features
45
        elif is_conv:
V
v0xie 已提交
46
            self.out_dim = self.sd_module.out_channels
V
v0xie 已提交
47 48
        elif is_other_linear:
            self.out_dim = self.sd_module.embed_dim
49 50 51

        if self.is_kohya:
            self.constraint = self.alpha * self.out_dim
V
v0xie 已提交
52 53
            self.num_blocks = self.dim
            self.block_size = self.out_dim // self.dim
54 55
        else:
            self.constraint = None
56 57
            self.block_size, self.num_blocks = factorization(self.out_dim, self.dim)

K
Kohaku-Blueleaf 已提交
58
    def calc_updown(self, orig_weight):
59
        oft_blocks = self.oft_blocks.to(orig_weight.device)
60 61
        eye = torch.eye(self.block_size, device=self.oft_blocks.device)

K
Kohaku-Blueleaf 已提交
62 63 64 65 66
        if self.is_kohya:
            block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix
            norm_Q = torch.norm(block_Q.flatten())
            new_norm_Q = torch.clamp(norm_Q, max=self.constraint)
            block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
67
            oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())
V
v0xie 已提交
68

69
        R = oft_blocks.to(orig_weight.device)
V
v0xie 已提交
70

V
v0xie 已提交
71 72 73 74 75 76 77 78
        # This errors out for MultiheadAttention, might need to be handled up-stream
        merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
        merged_weight = torch.einsum(
            'k n m, k n ... -> k m ...',
            R,
            merged_weight
        )
        merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
V
v0xie 已提交
79

80
        updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype)
81
        output_shape = orig_weight.shape
V
v0xie 已提交
82
        return self.finalize_updown(updown, orig_weight, output_shape)