hypernetwork.py 33.9 KB
Newer Older
D
discus0434 已提交
1
import csv
A
AUTOMATIC 已提交
2 3 4 5 6 7
import datetime
import glob
import html
import os
import sys
import traceback
8
import inspect
D
update  
discus0434 已提交
9

D
discus0434 已提交
10
import modules.textual_inversion.dataset
D
update  
discus0434 已提交
11
import torch
D
discus0434 已提交
12
import tqdm
D
update  
discus0434 已提交
13
from einops import rearrange, repeat
D
discus0434 已提交
14
from ldm.util import default
15
from modules import devices, processing, sd_models, shared, sd_samplers, hashes
16
from modules.textual_inversion import textual_inversion, logging
17
from modules.textual_inversion.learn_schedule import LearnRateScheduler
D
discus0434 已提交
18
from torch import einsum
19
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
20

A
AngelBottomless 已提交
21
from collections import defaultdict, deque
A
AngelBottomless 已提交
22
from statistics import stdev, mean
23

24

A
apply  
aria1th 已提交
25
optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
26

A
AUTOMATIC 已提交
27
class HypernetworkModule(torch.nn.Module):
A
AUTOMATIC 已提交
28
    multiplier = 1.0
D
discus0434 已提交
29
    activation_dict = {
30
        "linear": torch.nn.Identity,
D
discus0434 已提交
31 32 33 34
        "relu": torch.nn.ReLU,
        "leakyrelu": torch.nn.LeakyReLU,
        "elu": torch.nn.ELU,
        "swish": torch.nn.Hardswish,
35 36
        "tanh": torch.nn.Tanh,
        "sigmoid": torch.nn.Sigmoid,
D
discus0434 已提交
37
    }
38
    activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
D
discus0434 已提交
39

40
    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
A
aria1th 已提交
41
                 add_layer_norm=False, activate_output=False, dropout_structure=None):
A
AUTOMATIC 已提交
42
        super().__init__()
43

D
update  
discus0434 已提交
44
        assert layer_structure is not None, "layer_structure must not be None"
45 46
        assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
        assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
A
AUTOMATIC 已提交
47

48 49
        linears = []
        for i in range(len(layer_structure) - 1):
D
discus0434 已提交
50 51

            # Add a fully-connected layer
52
            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
D
discus0434 已提交
53

54
            # Add an activation func except last layer
G
guaneec 已提交
55
            if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
D
discus0434 已提交
56 57 58
                pass
            elif activation_func in self.activation_dict:
                linears.append(self.activation_dict[activation_func]())
59
            else:
D
discus0434 已提交
60
                raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
D
discus0434 已提交
61 62

            # Add layer normalization
63
            if add_layer_norm:
64 65
                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))

A
aria1th 已提交
66 67 68 69 70 71
            # Everything should be now parsed into dropout structure, and applied here.
            # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0.
            if dropout_structure is not None and dropout_structure[i+1] > 0:
                assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!"
                linears.append(torch.nn.Dropout(p=dropout_structure[i+1]))
            # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0].
D
discus0434 已提交
72

73
        self.linear = torch.nn.Sequential(*linears)
A
AUTOMATIC 已提交
74 75

        if state_dict is not None:
76 77
            self.fix_old_state_dict(state_dict)
            self.load_state_dict(state_dict)
A
AUTOMATIC 已提交
78
        else:
79
            for layer in self.linear:
80
                if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
81 82 83
                    w, b = layer.weight.data, layer.bias.data
                    if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
                        normal_(w, mean=0.0, std=0.01)
84
                        normal_(b, mean=0.0, std=0)
85 86 87 88 89 90 91 92 93 94 95 96 97 98
                    elif weight_init == 'XavierUniform':
                        xavier_uniform_(w)
                        zeros_(b)
                    elif weight_init == 'XavierNormal':
                        xavier_normal_(w)
                        zeros_(b)
                    elif weight_init == 'KaimingUniform':
                        kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
                        zeros_(b)
                    elif weight_init == 'KaimingNormal':
                        kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
                        zeros_(b)
                    else:
                        raise KeyError(f"Key {weight_init} is not defined as initialization!")
A
AUTOMATIC 已提交
99 100
        self.to(devices.device)

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    def fix_old_state_dict(self, state_dict):
        changes = {
            'linear1.bias': 'linear.0.bias',
            'linear1.weight': 'linear.0.weight',
            'linear2.bias': 'linear.1.bias',
            'linear2.weight': 'linear.1.weight',
        }

        for fr, to in changes.items():
            x = state_dict.get(fr, None)
            if x is None:
                continue

            del state_dict[fr]
            state_dict[to] = x
116

A
AUTOMATIC 已提交
117
    def forward(self, x):
A
aria1th 已提交
118
        return x + self.linear(x) * (HypernetworkModule.multiplier if not self.training else 1)
119 120

    def trainables(self):
121
        layer_structure = []
122
        for layer in self.linear:
123
            if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
D
update  
discus0434 已提交
124
                layer_structure += [layer.weight, layer.bias]
125
        return layer_structure
A
AUTOMATIC 已提交
126 127 128 129


def apply_strength(value=None):
    HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
A
AUTOMATIC 已提交
130

A
aria1th 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check.
def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout):
    if layer_structure is None:
        layer_structure = [1, 2, 1]
    if not use_dropout:
        return [0] * len(layer_structure)
    dropout_values = [0]
    dropout_values.extend([0.3] * (len(layer_structure) - 3))
    if last_layer_dropout:
        dropout_values.append(0.3)
    else:
        dropout_values.append(0)
    dropout_values.append(0)
    return dropout_values

A
AUTOMATIC 已提交
146 147 148 149 150

class Hypernetwork:
    filename = None
    name = None

151
    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
A
AUTOMATIC 已提交
152 153 154 155 156 157
        self.filename = None
        self.name = name
        self.layers = {}
        self.step = 0
        self.sd_checkpoint = None
        self.sd_checkpoint_name = None
158
        self.layer_structure = layer_structure
D
update  
discus0434 已提交
159
        self.activation_func = activation_func
160
        self.weight_init = weight_init
161
        self.add_layer_norm = add_layer_norm
D
discus0434 已提交
162
        self.use_dropout = use_dropout
G
guaneec 已提交
163
        self.activate_output = activate_output
A
aria1th 已提交
164 165 166 167
        self.last_layer_dropout = kwargs.get('last_layer_dropout', True)
        self.dropout_structure = kwargs.get('dropout_structure', None)
        if self.dropout_structure is None:
            self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
A
apply  
aria1th 已提交
168 169
        self.optimizer_name = None
        self.optimizer_state_dict = None
A
aria1th 已提交
170
        self.optional_info = None
A
AUTOMATIC 已提交
171

172
        for size in enable_sizes or []:
173
            self.layers[size] = (
174
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
A
aria1th 已提交
175
                                   self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
176
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
A
aria1th 已提交
177
                                   self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
178
            )
A
aria1th 已提交
179
        self.eval()
A
AUTOMATIC 已提交
180 181 182

    def weights(self):
        res = []
F
flamelaw 已提交
183 184 185 186
        for k, layers in self.layers.items():
            for layer in layers:
                res += layer.parameters()
        return res
A
AUTOMATIC 已提交
187

A
aria1th 已提交
188
    def train(self, mode=True):
A
AUTOMATIC 已提交
189 190
        for k, layers in self.layers.items():
            for layer in layers:
A
aria1th 已提交
191
                layer.train(mode=mode)
F
flamelaw 已提交
192
                for param in layer.parameters():
A
aria1th 已提交
193
                    param.requires_grad = mode
A
AUTOMATIC 已提交
194

A
aria1th 已提交
195
    def eval(self):
F
flamelaw 已提交
196 197 198 199 200
        for k, layers in self.layers.items():
            for layer in layers:
                layer.eval()
                for param in layer.parameters():
                    param.requires_grad = False
A
AUTOMATIC 已提交
201 202 203

    def save(self, filename):
        state_dict = {}
A
apply  
aria1th 已提交
204
        optimizer_saved_dict = {}
A
AUTOMATIC 已提交
205 206 207 208 209 210

        for k, v in self.layers.items():
            state_dict[k] = (v[0].state_dict(), v[1].state_dict())

        state_dict['step'] = self.step
        state_dict['name'] = self.name
211
        state_dict['layer_structure'] = self.layer_structure
D
update  
discus0434 已提交
212
        state_dict['activation_func'] = self.activation_func
213
        state_dict['is_layer_norm'] = self.add_layer_norm
214
        state_dict['weight_initialization'] = self.weight_init
A
AUTOMATIC 已提交
215 216
        state_dict['sd_checkpoint'] = self.sd_checkpoint
        state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
G
guaneec 已提交
217
        state_dict['activate_output'] = self.activate_output
A
aria1th 已提交
218 219 220 221
        state_dict['use_dropout'] = self.use_dropout
        state_dict['dropout_structure'] = self.dropout_structure
        state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout
        state_dict['optional_info'] = self.optional_info if self.optional_info else None
A
apply  
aria1th 已提交
222 223 224

        if self.optimizer_name is not None:
            optimizer_saved_dict['optimizer_name'] = self.optimizer_name
A
AUTOMATIC 已提交
225 226

        torch.save(state_dict, filename)
A
aria1th 已提交
227
        if shared.opts.save_optimizer_state and self.optimizer_state_dict:
228
            optimizer_saved_dict['hash'] = self.shorthash()
A
apply  
aria1th 已提交
229 230
            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
            torch.save(optimizer_saved_dict, filename + '.optim')
A
AUTOMATIC 已提交
231 232 233 234 235 236 237 238

    def load(self, filename):
        self.filename = filename
        if self.name is None:
            self.name = os.path.splitext(os.path.basename(filename))[0]

        state_dict = torch.load(filename, map_location='cpu')

239
        self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
240
        self.optional_info = state_dict.get('optional_info', None)
D
update  
discus0434 已提交
241
        self.activation_func = state_dict.get('activation_func', None)
242
        self.weight_init = state_dict.get('weight_initialization', 'Normal')
243
        self.add_layer_norm = state_dict.get('is_layer_norm', False)
A
aria1th 已提交
244 245
        self.dropout_structure = state_dict.get('dropout_structure', None)
        self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False)
G
guaneec 已提交
246
        self.activate_output = state_dict.get('activate_output', True)
247
        self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
A
aria1th 已提交
248 249 250
        # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0.
        if self.dropout_structure is None:
            self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
251

252 253 254
        if shared.opts.print_hypernet_extra:
            if self.optional_info is not None:
                print(f"  INFO:\n {self.optional_info}\n")
A
aria1th 已提交
255

256 257 258 259 260 261 262 263 264 265 266
            print(f"  Layer structure: {self.layer_structure}")
            print(f"  Activation function: {self.activation_func}")
            print(f"  Weight initialization: {self.weight_init}")
            print(f"  Layer norm: {self.add_layer_norm}")
            print(f"  Dropout usage: {self.use_dropout}" )
            print(f"  Activate last layer: {self.activate_output}")
            print(f"  Dropout structure: {self.dropout_structure}")

        optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {}

        if self.shorthash() == optimizer_saved_dict.get('hash', None):
A
apply  
aria1th 已提交
267 268 269 270
            self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
        else:
            self.optimizer_state_dict = None
        if self.optimizer_state_dict:
A
aria1th 已提交
271
            self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
A
apply  
aria1th 已提交
272
            print("Loaded existing optimizer from checkpoint")
A
aria1th 已提交
273
            print(f"Optimizer name is {self.optimizer_name}")
A
apply  
aria1th 已提交
274
        else:
A
aria1th 已提交
275
            self.optimizer_name = "AdamW"
A
apply  
aria1th 已提交
276
            print("No saved optimizer exists in checkpoint")
277

A
AUTOMATIC 已提交
278 279
        for size, sd in state_dict.items():
            if type(size) == int:
280
                self.layers[size] = (
281
                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
A
aria1th 已提交
282
                                       self.add_layer_norm, self.activate_output, self.dropout_structure),
283
                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
A
aria1th 已提交
284
                                       self.add_layer_norm, self.activate_output, self.dropout_structure),
285
                )
A
AUTOMATIC 已提交
286 287 288 289 290

        self.name = state_dict.get('name', self.name)
        self.step = state_dict.get('step', 0)
        self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
        self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
A
aria1th 已提交
291
        self.eval()
A
AUTOMATIC 已提交
292

293 294 295 296 297
    def shorthash(self):
        sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}')

        return sha256[0:10]

A
AUTOMATIC 已提交
298

A
AUTOMATIC 已提交
299
def list_hypernetworks(path):
A
AUTOMATIC 已提交
300
    res = {}
I
Isaac Poulton 已提交
301
    for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)):
A
AUTOMATIC 已提交
302
        name = os.path.splitext(os.path.basename(filename))[0]
303 304
        # Prevent a hypothetical "None.pt" from being listed.
        if name != "None":
305
            res[name] = filename
A
AUTOMATIC 已提交
306
    return res
A
AUTOMATIC 已提交
307

A
AUTOMATIC 已提交
308 309 310

def load_hypernetwork(filename):
    path = shared.hypernetworks.get(filename, None)
311 312
    # Prevent any file named "None.pt" from being loaded.
    if path is not None and filename != "None":
A
AUTOMATIC 已提交
313
        print(f"Loading hypernetwork {filename}")
A
AUTOMATIC 已提交
314
        try:
A
AUTOMATIC 已提交
315 316 317
            shared.loaded_hypernetwork = Hypernetwork()
            shared.loaded_hypernetwork.load(path)

A
AUTOMATIC 已提交
318
        except Exception:
A
AUTOMATIC 已提交
319
            print(f"Error loading hypernetwork {path}", file=sys.stderr)
A
AUTOMATIC 已提交
320
            print(traceback.format_exc(), file=sys.stderr)
A
AUTOMATIC 已提交
321 322
    else:
        if shared.loaded_hypernetwork is not None:
323
            print("Unloading hypernetwork")
A
AUTOMATIC 已提交
324

A
AUTOMATIC 已提交
325
        shared.loaded_hypernetwork = None
A
AUTOMATIC 已提交
326 327


M
Milly 已提交
328 329 330 331 332 333 334 335 336 337 338
def find_closest_hypernetwork_name(search: str):
    if not search:
        return None
    search = search.lower()
    applicable = [name for name in shared.hypernetworks if search in name.lower()]
    if not applicable:
        return None
    applicable = sorted(applicable, key=lambda name: len(name))
    return applicable[0]


A
AUTOMATIC 已提交
339 340
def apply_hypernetwork(hypernetwork, context, layer=None):
    hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
A
AUTOMATIC 已提交
341

A
AUTOMATIC 已提交
342 343
    if hypernetwork_layers is None:
        return context, context
A
AUTOMATIC 已提交
344

A
AUTOMATIC 已提交
345 346 347
    if layer is not None:
        layer.hyper_k = hypernetwork_layers[0]
        layer.hyper_v = hypernetwork_layers[1]
A
AUTOMATIC 已提交
348

A
AUTOMATIC 已提交
349 350 351
    context_k = hypernetwork_layers[0](context)
    context_v = hypernetwork_layers[1](context)
    return context_k, context_v
A
AUTOMATIC 已提交
352 353


A
AUTOMATIC 已提交
354 355 356 357 358
def attention_CrossAttention_forward(self, x, context=None, mask=None):
    h = self.heads

    q = self.to_q(x)
    context = default(context, x)
A
AUTOMATIC 已提交
359

A
AUTOMATIC 已提交
360
    context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
A
AUTOMATIC 已提交
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
    k = self.to_k(context_k)
    v = self.to_v(context_v)

    q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

    sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

    if mask is not None:
        mask = rearrange(mask, 'b ... -> b (...)')
        max_neg_value = -torch.finfo(sim.dtype).max
        mask = repeat(mask, 'b j -> (b h) () j', h=h)
        sim.masked_fill_(~mask, max_neg_value)

    # attention, what we cannot get enough of
    attn = sim.softmax(dim=-1)

    out = einsum('b i j, b j d -> b i d', attn, v)
    out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
    return self.to_out(out)


382 383 384 385 386 387 388 389 390 391 392 393 394 395
def stack_conds(conds):
    if len(conds) == 1:
        return torch.stack(conds)

    # same as in reconstruct_multicond_batch
    token_count = max([x.shape[0] for x in conds])
    for i in range(len(conds)):
        if conds[i].shape[0] != token_count:
            last_vector = conds[i][-1:]
            last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
            conds[i] = torch.vstack([conds[i], last_vector_repeated])

    return torch.stack(conds)

396

A
AngelBottomless 已提交
397
def statistics(data):
A
AngelBottomless 已提交
398 399 400 401 402
    if len(data) < 2:
        std = 0
    else:
        std = stdev(data)
    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
A
AngelBottomless 已提交
403
    recent_data = data[-32:]
A
AngelBottomless 已提交
404 405 406 407 408
    if len(recent_data) < 2:
        std = 0
    else:
        std = stdev(recent_data)
    recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
A
AngelBottomless 已提交
409 410 411 412 413 414
    return total_information, recent_information


def report_statistics(loss_info:dict):
    keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
    for key in keys:
D
DepFA 已提交
415 416
        try:
            print("Loss statistics for file " + key)
A
AngelBottomless 已提交
417
            info, recent = statistics(list(loss_info[key]))
D
DepFA 已提交
418 419 420 421
            print(info)
            print(recent)
        except Exception as e:
            print(e)
A
AngelBottomless 已提交
422 423


A
aria1th 已提交
424
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
V
Vladimir Mandic 已提交
425 426
    # Remove illegal characters from name.
    name = "".join( x for x in name if (x.isalnum() or x in "._- "))
A
aria1th 已提交
427
    assert name, "Name cannot be empty!"
V
Vladimir Mandic 已提交
428 429 430 431 432 433 434 435

    fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
    if not overwrite_old:
        assert not os.path.exists(fn), f"file {fn} already exists"

    if type(layer_structure) == str:
        layer_structure = [float(x.strip()) for x in layer_structure.split(",")]

A
aria1th 已提交
436 437 438 439 440
    if use_dropout and dropout_structure and type(dropout_structure) == str:
        dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")]
    else:
        dropout_structure = [0] * len(layer_structure)

V
Vladimir Mandic 已提交
441 442 443 444 445 446 447 448
    hypernet = modules.hypernetworks.hypernetwork.Hypernetwork(
        name=name,
        enable_sizes=[int(x) for x in enable_sizes],
        layer_structure=layer_structure,
        activation_func=activation_func,
        weight_init=weight_init,
        add_layer_norm=add_layer_norm,
        use_dropout=use_dropout,
A
aria1th 已提交
449
        dropout_structure=dropout_structure
V
Vladimir Mandic 已提交
450 451 452 453
    )
    hypernet.save(fn)

    shared.reload_hypernetworks()
454

V
Vladimir Mandic 已提交
455

456
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
T
timntorres 已提交
457
    # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
458 459
    from modules import images

460 461
    save_hypernetwork_every = save_hypernetwork_every or 0
    create_image_every = create_image_every or 0
462 463 464
    template_file = textual_inversion.textual_inversion_templates.get(template_filename, None)
    textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
    template_file = template_file.path
A
AUTOMATIC 已提交
465

A
AUTOMATIC 已提交
466 467 468
    path = shared.hypernetworks.get(hypernetwork_name, None)
    shared.loaded_hypernetwork = Hypernetwork()
    shared.loaded_hypernetwork.load(path)
A
AUTOMATIC 已提交
469

V
Vladimir Mandic 已提交
470
    shared.state.job = "train-hypernetwork"
A
AUTOMATIC 已提交
471 472 473
    shared.state.textinfo = "Initializing hypernetwork training..."
    shared.state.job_count = steps

A
aria1th 已提交
474
    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
A
AUTOMATIC 已提交
475 476 477
    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')

    log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
478
    unload = shared.opts.unload_models_when_training
A
AUTOMATIC 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491

    if save_hypernetwork_every > 0:
        hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
        os.makedirs(hypernetwork_dir, exist_ok=True)
    else:
        hypernetwork_dir = None

    if create_image_every > 0:
        images_dir = os.path.join(log_directory, "images")
        os.makedirs(images_dir, exist_ok=True)
    else:
        images_dir = None

A
AUTOMATIC 已提交
492
    hypernetwork = shared.loaded_hypernetwork
493
    checkpoint = sd_models.select_checkpoint()
A
AUTOMATIC 已提交
494

M
Melan 已提交
495
    initial_step = hypernetwork.step or 0
496
    if initial_step >= steps:
497
        shared.state.textinfo = "Model has already been trained beyond specified max steps"
A
AUTOMATIC 已提交
498 499
        return hypernetwork, filename

M
Melan 已提交
500
    scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
501
    
502
    clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None
M
Muhammad Rizqi Nur 已提交
503
    if clip_grad:
504
        clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
A
AUTOMATIC 已提交
505

506 507 508
    if shared.opts.training_enable_tensorboard:
        tensorboard_writer = textual_inversion.tensorboard_setup(log_directory)

509
    # dataset loading may take a while, so input validations and early returns should be done before this
A
AUTOMATIC 已提交
510 511
    shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."

512
    pin_memory = shared.opts.pin_memory
513

D
dan 已提交
514
    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
515

516
    if shared.opts.save_training_settings_to_txt:
517
        saved_params = dict(
A
AUTOMATIC 已提交
518
            model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
519 520 521
            **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
        )
        logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
A
AUTOMATIC 已提交
522

523
    latent_sampling_method = ds.latent_sampling_method
524

525
    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
526

527
    old_parallel_processing_allowed = shared.parallel_processing_allowed
A
AUTOMATIC 已提交
528

529
    if unload:
530
        shared.parallel_processing_allowed = False
531 532
        shared.sd_model.cond_stage_model.to(devices.cpu)
        shared.sd_model.first_stage_model.to(devices.cpu)
A
AUTOMATIC 已提交
533

534
    weights = hypernetwork.weights()
A
aria1th 已提交
535
    hypernetwork.train()
A
AUTOMATIC 已提交
536

A
apply  
aria1th 已提交
537
    # Here we use optimizer from saved HN, or we can specify as UI option.
538
    if hypernetwork.optimizer_name in optimizer_dict:
A
apply  
aria1th 已提交
539
        optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
540
        optimizer_name = hypernetwork.optimizer_name
A
apply  
aria1th 已提交
541
    else:
542
        print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
A
apply  
aria1th 已提交
543 544
        optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
        optimizer_name = 'AdamW'
545

A
apply  
aria1th 已提交
546 547 548 549 550 551
    if hypernetwork.optimizer_state_dict:  # This line must be changed if Optimizer type can be different from saved optimizer.
        try:
            optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
        except RuntimeError as e:
            print("Cannot resume from saved optimizer!")
            print(e)
A
AUTOMATIC 已提交
552

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
    scaler = torch.cuda.amp.GradScaler()
    
    batch_size = ds.batch_size
    gradient_step = ds.gradient_step
    # n steps = batch_size * gradient_step * n image processed
    steps_per_epoch = len(ds) // batch_size // gradient_step
    max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
    loss_step = 0
    _loss_step = 0 #internal
    # size = len(ds.indexes)
    # loss_dict = defaultdict(lambda : deque(maxlen = 1024))
    # losses = torch.zeros((size,))
    # previous_mean_losses = [0]
    # previous_mean_loss = 0
    # print("Mean loss of {} elements".format(size))
A
AUTOMATIC 已提交
568

569 570
    steps_without_grad = 0

571 572 573 574
    last_saved_file = "<none>"
    last_saved_image = "<none>"
    forced_filename = "<none>"

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
    pbar = tqdm.tqdm(total=steps - initial_step)
    try:
        for i in range((steps-initial_step) * gradient_step):
            if scheduler.finished:
                break
            if shared.state.interrupted:
                break
            for j, batch in enumerate(dl):
                # works as a drop_last=True for gradient accumulation
                if j == max_steps_per_epoch:
                    break
                scheduler.apply(optimizer, hypernetwork.step)
                if scheduler.finished:
                    break
                if shared.state.interrupted:
                    break

592 593
                if clip_grad:
                    clip_grad_sched.step(hypernetwork.step)
A
AngelBottomless 已提交
594
                
595
                with devices.autocast():
596 597 598 599 600 601 602 603 604 605
                    x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
                    if tag_drop_out != 0 or shuffle_tags:
                        shared.sd_model.cond_stage_model.to(devices.device)
                        c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
                        shared.sd_model.cond_stage_model.to(devices.cpu)
                    else:
                        c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
                    loss = shared.sd_model(x, c)[0] / gradient_step
                    del x
                    del c
A
aria1th 已提交
606

607 608
                    _loss_step += loss.item()
                scaler.scale(loss).backward()
609
                
610 611 612
                # go back until we reach gradient accumulation steps
                if (j + 1) % gradient_step != 0:
                    continue
A
aria1th 已提交
613

614 615 616
                if clip_grad:
                    clip_grad(weights, clip_grad_sched.learn_rate)
                
617 618 619 620 621 622 623 624 625
                scaler.step(optimizer)
                scaler.update()
                hypernetwork.step += 1
                pbar.update()
                optimizer.zero_grad(set_to_none=True)
                loss_step = _loss_step
                _loss_step = 0

                steps_done = hypernetwork.step + 1
A
AngelBottomless 已提交
626
                
627 628 629
                epoch_num = hypernetwork.step // steps_per_epoch
                epoch_step = hypernetwork.step % steps_per_epoch

V
Vladimir Mandic 已提交
630 631 632
                description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
                pbar.set_description(description)
                shared.state.textinfo = description
633 634 635 636 637 638 639 640 641 642
                if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
                    # Before saving, change name to match current checkpoint.
                    hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
                    last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
                    hypernetwork.optimizer_name = optimizer_name
                    if shared.opts.save_optimizer_state:
                        hypernetwork.optimizer_state_dict = optimizer.state_dict()
                    save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
                    hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.

643 644 645 646 647 648 649 650


                if shared.opts.training_enable_tensorboard:
                    epoch_num = hypernetwork.step // len(ds)
                    epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1

                    textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num)

651 652 653 654 655 656 657 658
                textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
                    "loss": f"{loss_step:.7f}",
                    "learn_rate": scheduler.learn_rate
                })

                if images_dir is not None and steps_done % create_image_every == 0:
                    forced_filename = f'{hypernetwork_name}-{steps_done}'
                    last_saved_image = os.path.join(images_dir, forced_filename)
A
aria1th 已提交
659 660 661 662 663
                    hypernetwork.eval()
                    rng_state = torch.get_rng_state()
                    cuda_rng_state = None
                    if torch.cuda.is_available():
                        cuda_rng_state = torch.cuda.get_rng_state_all()
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
                    shared.sd_model.cond_stage_model.to(devices.device)
                    shared.sd_model.first_stage_model.to(devices.device)

                    p = processing.StableDiffusionProcessingTxt2Img(
                        sd_model=shared.sd_model,
                        do_not_save_grid=True,
                        do_not_save_samples=True,
                    )

                    if preview_from_txt2img:
                        p.prompt = preview_prompt
                        p.negative_prompt = preview_negative_prompt
                        p.steps = preview_steps
                        p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
                        p.cfg_scale = preview_cfg_scale
                        p.seed = preview_seed
                        p.width = preview_width
                        p.height = preview_height
                    else:
                        p.prompt = batch.cond_text[0]
                        p.steps = 20
                        p.width = training_width
                        p.height = training_height
A
aria1th 已提交
687

688
                    preview_text = p.prompt
A
aria1th 已提交
689

690 691
                    processed = processing.process_images(p)
                    image = processed.images[0] if len(processed.images) > 0 else None
692 693 694
                    
                    if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
                        textual_inversion.tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, hypernetwork.step)
A
aria1th 已提交
695

696 697 698
                    if unload:
                        shared.sd_model.cond_stage_model.to(devices.cpu)
                        shared.sd_model.first_stage_model.to(devices.cpu)
A
aria1th 已提交
699 700 701 702
                    torch.set_rng_state(rng_state)
                    if torch.cuda.is_available():
                        torch.cuda.set_rng_state_all(cuda_rng_state)
                    hypernetwork.train()
703 704 705 706
                    if image is not None:
                        shared.state.current_image = image
                        last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
                        last_saved_image += f", prompt: {preview_text}"
A
AUTOMATIC 已提交
707

708
                shared.state.job_no = hypernetwork.step
A
AUTOMATIC 已提交
709

710
                shared.state.textinfo = f"""
A
AUTOMATIC 已提交
711
<p>
712
Loss: {loss_step:.7f}<br/>
F
flamelaw 已提交
713
Step: {steps_done}<br/>
714
Last prompt: {html.escape(batch.cond_text[0])}<br/>
D
DepFA 已提交
715
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
A
AUTOMATIC 已提交
716 717 718
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
719 720 721 722 723
    except Exception:
        print(traceback.format_exc(), file=sys.stderr)
    finally:
        pbar.leave = False
        pbar.close()
A
aria1th 已提交
724
        hypernetwork.eval()
725
        #report_statistics(loss_dict)
A
AUTOMATIC 已提交
726

727
    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
A
apply  
aria1th 已提交
728 729 730
    hypernetwork.optimizer_name = optimizer_name
    if shared.opts.save_optimizer_state:
        hypernetwork.optimizer_state_dict = optimizer.state_dict()
731
    save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
A
AUTOMATIC 已提交
732

A
apply  
aria1th 已提交
733 734
    del optimizer
    hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
735 736
    shared.sd_model.cond_stage_model.to(devices.device)
    shared.sd_model.first_stage_model.to(devices.device)
737
    shared.parallel_processing_allowed = old_parallel_processing_allowed
A
AUTOMATIC 已提交
738 739 740

    return hypernetwork, filename

741 742 743 744 745
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
    old_hypernetwork_name = hypernetwork.name
    old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
    old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
    try:
A
AUTOMATIC 已提交
746
        hypernetwork.sd_checkpoint = checkpoint.shorthash
747 748 749 750 751 752 753 754
        hypernetwork.sd_checkpoint_name = checkpoint.model_name
        hypernetwork.name = hypernetwork_name
        hypernetwork.save(filename)
    except:
        hypernetwork.sd_checkpoint = old_sd_checkpoint
        hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
        hypernetwork.name = old_hypernetwork_name
        raise