hypernetwork.py 18.8 KB
Newer Older
D
discus0434 已提交
1
import csv
A
AUTOMATIC 已提交
2 3 4 5 6 7 8
import datetime
import glob
import html
import os
import sys
import traceback

D
discus0434 已提交
9
import modules.textual_inversion.dataset
A
AUTOMATIC 已提交
10
import torch
D
discus0434 已提交
11
import tqdm
D
update  
discus0434 已提交
12
from einops import rearrange, repeat
D
discus0434 已提交
13 14
from ldm.util import default
from modules import devices, processing, sd_models, shared
15
from modules.textual_inversion import textual_inversion
16
from modules.textual_inversion.learn_schedule import LearnRateScheduler
D
discus0434 已提交
17
from torch import einsum
18

A
AngelBottomless 已提交
19
from collections import defaultdict, deque
A
AngelBottomless 已提交
20
from statistics import stdev, mean
21

A
AUTOMATIC 已提交
22
class HypernetworkModule(torch.nn.Module):
A
AUTOMATIC 已提交
23
    multiplier = 1.0
D
discus0434 已提交
24 25 26 27 28 29 30
    activation_dict = {
        "relu": torch.nn.ReLU,
        "leakyrelu": torch.nn.LeakyReLU,
        "elu": torch.nn.ELU,
        "swish": torch.nn.Hardswish,
    }

G
guaneec 已提交
31
    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
A
AUTOMATIC 已提交
32
        super().__init__()
33

D
update  
discus0434 已提交
34
        assert layer_structure is not None, "layer_structure must not be None"
35 36
        assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
        assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
D
discus0434 已提交
37

38 39
        linears = []
        for i in range(len(layer_structure) - 1):
D
discus0434 已提交
40 41

            # Add a fully-connected layer
42
            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
D
discus0434 已提交
43

44
            # Add an activation func except last layer
G
guaneec 已提交
45
            if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
D
discus0434 已提交
46 47 48
                pass
            elif activation_func in self.activation_dict:
                linears.append(self.activation_dict[activation_func]())
49
            else:
D
discus0434 已提交
50
                raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
D
discus0434 已提交
51 52

            # Add layer normalization
A
aria1th 已提交
53 54
            if add_layer_norm:
                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
55

56
            # Add dropout except last layer
G
guaneec 已提交
57
            if use_dropout and i < len(layer_structure) - 2:
D
discus0434 已提交
58
                linears.append(torch.nn.Dropout(p=0.3))
D
discus0434 已提交
59

60
        self.linear = torch.nn.Sequential(*linears)
A
AUTOMATIC 已提交
61 62

        if state_dict is not None:
63 64
            self.fix_old_state_dict(state_dict)
            self.load_state_dict(state_dict)
A
AUTOMATIC 已提交
65
        else:
66
            for layer in self.linear:
67
                if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
D
update  
discus0434 已提交
68 69
                    layer.weight.data.normal_(mean=0.0, std=0.01)
                    layer.bias.data.zero_()
A
AUTOMATIC 已提交
70 71 72

        self.to(devices.device)

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    def fix_old_state_dict(self, state_dict):
        changes = {
            'linear1.bias': 'linear.0.bias',
            'linear1.weight': 'linear.0.weight',
            'linear2.bias': 'linear.1.bias',
            'linear2.weight': 'linear.1.weight',
        }

        for fr, to in changes.items():
            x = state_dict.get(fr, None)
            if x is None:
                continue

            del state_dict[fr]
            state_dict[to] = x
88

A
AUTOMATIC 已提交
89
    def forward(self, x):
90 91 92
        return x + self.linear(x) * self.multiplier

    def trainables(self):
93
        layer_structure = []
94
        for layer in self.linear:
95
            if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
D
update  
discus0434 已提交
96
                layer_structure += [layer.weight, layer.bias]
97
        return layer_structure
A
AUTOMATIC 已提交
98 99 100 101


def apply_strength(value=None):
    HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
A
AUTOMATIC 已提交
102 103 104 105 106 107


class Hypernetwork:
    filename = None
    name = None

G
guaneec 已提交
108
    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
A
AUTOMATIC 已提交
109 110 111 112 113 114
        self.filename = None
        self.name = name
        self.layers = {}
        self.step = 0
        self.sd_checkpoint = None
        self.sd_checkpoint_name = None
115
        self.layer_structure = layer_structure
D
update  
discus0434 已提交
116
        self.activation_func = activation_func
D
discus0434 已提交
117 118
        self.add_layer_norm = add_layer_norm
        self.use_dropout = use_dropout
G
guaneec 已提交
119
        self.activate_output = activate_output
A
AUTOMATIC 已提交
120

121
        for size in enable_sizes or []:
122
            self.layers[size] = (
G
guaneec 已提交
123 124
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
125
            )
A
AUTOMATIC 已提交
126 127 128 129 130 131

    def weights(self):
        res = []

        for k, layers in self.layers.items():
            for layer in layers:
A
aria1th 已提交
132
                layer.train()
133
                res += layer.trainables()
A
AUTOMATIC 已提交
134 135 136 137 138 139 140 141 142 143 144

        return res

    def save(self, filename):
        state_dict = {}

        for k, v in self.layers.items():
            state_dict[k] = (v[0].state_dict(), v[1].state_dict())

        state_dict['step'] = self.step
        state_dict['name'] = self.name
145
        state_dict['layer_structure'] = self.layer_structure
D
update  
discus0434 已提交
146
        state_dict['activation_func'] = self.activation_func
D
discus0434 已提交
147 148
        state_dict['is_layer_norm'] = self.add_layer_norm
        state_dict['use_dropout'] = self.use_dropout
A
AUTOMATIC 已提交
149 150
        state_dict['sd_checkpoint'] = self.sd_checkpoint
        state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
G
guaneec 已提交
151
        state_dict['activate_output'] = self.activate_output
A
AUTOMATIC 已提交
152 153 154 155 156 157 158 159 160 161

        torch.save(state_dict, filename)

    def load(self, filename):
        self.filename = filename
        if self.name is None:
            self.name = os.path.splitext(os.path.basename(filename))[0]

        state_dict = torch.load(filename, map_location='cpu')

162
        self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
D
update  
discus0434 已提交
163
        self.activation_func = state_dict.get('activation_func', None)
D
discus0434 已提交
164 165
        self.add_layer_norm = state_dict.get('is_layer_norm', False)
        self.use_dropout = state_dict.get('use_dropout', False)
G
guaneec 已提交
166
        self.activate_output = state_dict.get('activate_output', True)
167

A
AUTOMATIC 已提交
168 169
        for size, sd in state_dict.items():
            if type(size) == int:
170
                self.layers[size] = (
G
guaneec 已提交
171 172
                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
173
                )
A
AUTOMATIC 已提交
174 175 176 177 178 179 180

        self.name = state_dict.get('name', self.name)
        self.step = state_dict.get('step', 0)
        self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
        self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)


A
AUTOMATIC 已提交
181
def list_hypernetworks(path):
A
AUTOMATIC 已提交
182
    res = {}
A
AUTOMATIC 已提交
183 184 185 186
    for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
        name = os.path.splitext(os.path.basename(filename))[0]
        res[name] = filename
    return res
A
AUTOMATIC 已提交
187

A
AUTOMATIC 已提交
188 189 190 191 192

def load_hypernetwork(filename):
    path = shared.hypernetworks.get(filename, None)
    if path is not None:
        print(f"Loading hypernetwork {filename}")
A
AUTOMATIC 已提交
193
        try:
A
AUTOMATIC 已提交
194 195 196
            shared.loaded_hypernetwork = Hypernetwork()
            shared.loaded_hypernetwork.load(path)

A
AUTOMATIC 已提交
197
        except Exception:
A
AUTOMATIC 已提交
198
            print(f"Error loading hypernetwork {path}", file=sys.stderr)
A
AUTOMATIC 已提交
199
            print(traceback.format_exc(), file=sys.stderr)
A
AUTOMATIC 已提交
200 201 202
    else:
        if shared.loaded_hypernetwork is not None:
            print(f"Unloading hypernetwork")
A
AUTOMATIC 已提交
203

A
AUTOMATIC 已提交
204
        shared.loaded_hypernetwork = None
A
AUTOMATIC 已提交
205 206


M
Milly 已提交
207 208 209 210 211 212 213 214 215 216 217
def find_closest_hypernetwork_name(search: str):
    if not search:
        return None
    search = search.lower()
    applicable = [name for name in shared.hypernetworks if search in name.lower()]
    if not applicable:
        return None
    applicable = sorted(applicable, key=lambda name: len(name))
    return applicable[0]


A
AUTOMATIC 已提交
218 219
def apply_hypernetwork(hypernetwork, context, layer=None):
    hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
A
AUTOMATIC 已提交
220

A
AUTOMATIC 已提交
221 222
    if hypernetwork_layers is None:
        return context, context
A
AUTOMATIC 已提交
223

A
AUTOMATIC 已提交
224 225 226
    if layer is not None:
        layer.hyper_k = hypernetwork_layers[0]
        layer.hyper_v = hypernetwork_layers[1]
A
AUTOMATIC 已提交
227

A
AUTOMATIC 已提交
228 229 230
    context_k = hypernetwork_layers[0](context)
    context_v = hypernetwork_layers[1](context)
    return context_k, context_v
A
AUTOMATIC 已提交
231 232


A
AUTOMATIC 已提交
233 234 235 236 237
def attention_CrossAttention_forward(self, x, context=None, mask=None):
    h = self.heads

    q = self.to_q(x)
    context = default(context, x)
A
AUTOMATIC 已提交
238

A
AUTOMATIC 已提交
239
    context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
A
AUTOMATIC 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
    k = self.to_k(context_k)
    v = self.to_v(context_v)

    q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

    sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

    if mask is not None:
        mask = rearrange(mask, 'b ... -> b (...)')
        max_neg_value = -torch.finfo(sim.dtype).max
        mask = repeat(mask, 'b j -> (b h) () j', h=h)
        sim.masked_fill_(~mask, max_neg_value)

    # attention, what we cannot get enough of
    attn = sim.softmax(dim=-1)

    out = einsum('b i j, b j d -> b i d', attn, v)
    out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
    return self.to_out(out)


261 262 263 264 265 266 267 268 269 270 271 272 273 274
def stack_conds(conds):
    if len(conds) == 1:
        return torch.stack(conds)

    # same as in reconstruct_multicond_batch
    token_count = max([x.shape[0] for x in conds])
    for i in range(len(conds)):
        if conds[i].shape[0] != token_count:
            last_vector = conds[i][-1:]
            last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
            conds[i] = torch.vstack([conds[i], last_vector_repeated])

    return torch.stack(conds)

275

A
AngelBottomless 已提交
276
def statistics(data):
A
AngelBottomless 已提交
277 278 279 280 281
    if len(data) < 2:
        std = 0
    else:
        std = stdev(data)
    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
A
AngelBottomless 已提交
282
    recent_data = data[-32:]
A
AngelBottomless 已提交
283 284 285 286 287
    if len(recent_data) < 2:
        std = 0
    else:
        std = stdev(recent_data)
    recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
A
AngelBottomless 已提交
288 289 290 291 292 293
    return total_information, recent_information


def report_statistics(loss_info:dict):
    keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
    for key in keys:
D
DepFA 已提交
294 295
        try:
            print("Loss statistics for file " + key)
A
AngelBottomless 已提交
296
            info, recent = statistics(list(loss_info[key]))
D
DepFA 已提交
297 298 299 300
            print(info)
            print(recent)
        except Exception as e:
            print(e)
A
AngelBottomless 已提交
301 302 303



304
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
T
timntorres 已提交
305
    # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
306 307
    from modules import images

A
AUTOMATIC 已提交
308
    assert hypernetwork_name, 'hypernetwork not selected'
A
AUTOMATIC 已提交
309

A
AUTOMATIC 已提交
310 311 312
    path = shared.hypernetworks.get(hypernetwork_name, None)
    shared.loaded_hypernetwork = Hypernetwork()
    shared.loaded_hypernetwork.load(path)
A
AUTOMATIC 已提交
313 314 315 316 317 318 319

    shared.state.textinfo = "Initializing hypernetwork training..."
    shared.state.job_count = steps

    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')

    log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
320
    unload = shared.opts.unload_models_when_training
A
AUTOMATIC 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

    if save_hypernetwork_every > 0:
        hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
        os.makedirs(hypernetwork_dir, exist_ok=True)
    else:
        hypernetwork_dir = None

    if create_image_every > 0:
        images_dir = os.path.join(log_directory, "images")
        os.makedirs(images_dir, exist_ok=True)
    else:
        images_dir = None

    shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
    with torch.autocast("cuda"):
336
        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
337 338 339
    if unload:
        shared.sd_model.cond_stage_model.to(devices.cpu)
        shared.sd_model.first_stage_model.to(devices.cpu)
A
AUTOMATIC 已提交
340

A
AUTOMATIC 已提交
341
    hypernetwork = shared.loaded_hypernetwork
A
aria1th 已提交
342 343 344 345
    weights = hypernetwork.weights()
    for weight in weights:
        weight.requires_grad = True

A
AngelBottomless 已提交
346
    size = len(ds.indexes)
A
AngelBottomless 已提交
347
    loss_dict = defaultdict(lambda : deque(maxlen = 1024))
A
AngelBottomless 已提交
348
    losses = torch.zeros((size,))
A
AngelBottomless 已提交
349
    previous_mean_losses = [0]
A
AngelBottomless 已提交
350 351
    previous_mean_loss = 0
    print("Mean loss of {} elements".format(size))
A
AUTOMATIC 已提交
352 353 354

    last_saved_file = "<none>"
    last_saved_image = "<none>"
355
    forced_filename = "<none>"
A
AUTOMATIC 已提交
356 357 358 359 360

    ititial_step = hypernetwork.step or 0
    if ititial_step > steps:
        return hypernetwork, filename

361
    scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
A
aria1th 已提交
362 363
    # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
    optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
A
AUTOMATIC 已提交
364

365 366
    steps_without_grad = 0

A
AUTOMATIC 已提交
367
    pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
368
    for i, entries in pbar:
A
AUTOMATIC 已提交
369
        hypernetwork.step = i + ititial_step
A
AngelBottomless 已提交
370
        if len(loss_dict) > 0:
A
AngelBottomless 已提交
371 372
            previous_mean_losses = [i[-1] for i in loss_dict.values()]
            previous_mean_loss = mean(previous_mean_losses)
A
AngelBottomless 已提交
373
            
374 375 376
        scheduler.apply(optimizer, hypernetwork.step)
        if scheduler.finished:
            break
A
AUTOMATIC 已提交
377 378 379 380 381

        if shared.state.interrupted:
            break

        with torch.autocast("cuda"):
382
            c = stack_conds([entry.cond for entry in entries]).to(devices.device)
D
update  
discus0434 已提交
383
            # c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
384 385
            x = torch.stack([entry.latent for entry in entries]).to(devices.device)
            loss = shared.sd_model(x, c)[0]
A
AUTOMATIC 已提交
386
            del x
387
            del c
A
AUTOMATIC 已提交
388 389

            losses[hypernetwork.step % losses.shape[0]] = loss.item()
A
AngelBottomless 已提交
390
            for entry in entries:
A
AngelBottomless 已提交
391
                loss_dict[entry.filename].append(loss.item())
A
AngelBottomless 已提交
392
                
A
aria1th 已提交
393
            optimizer.zero_grad()
394
            weights[0].grad = None
A
AUTOMATIC 已提交
395
            loss.backward()
396 397 398 399 400 401 402

            if weights[0].grad is None:
                steps_without_grad += 1
            else:
                steps_without_grad = 0
            assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'

A
AUTOMATIC 已提交
403
            optimizer.step()
404

A
AngelBottomless 已提交
405
        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
406
            raise RuntimeError("Loss diverged.")
A
AngelBottomless 已提交
407 408 409 410 411 412 413
        
        if len(previous_mean_losses) > 1:
            std = stdev(previous_mean_losses)
        else:
            std = 0
        dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
        pbar.set_description(dataset_loss_info)
A
AUTOMATIC 已提交
414 415

        if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
416 417 418
            # Before saving, change name to match current checkpoint.
            hypernetwork.name = f'{hypernetwork_name}-{hypernetwork.step}'
            last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
A
AUTOMATIC 已提交
419 420
            hypernetwork.save(last_saved_file)

421
        textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
A
AngelBottomless 已提交
422
            "loss": f"{previous_mean_loss:.7f}",
D
update  
discus0434 已提交
423
            "learn_rate": scheduler.learn_rate
424
        })
425

A
AUTOMATIC 已提交
426
        if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
427 428
            forced_filename = f'{hypernetwork_name}-{hypernetwork.step}'
            last_saved_image = os.path.join(images_dir, forced_filename)
A
AUTOMATIC 已提交
429

A
aria1th 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
            optimizer.zero_grad()
            shared.sd_model.cond_stage_model.to(devices.device)
            shared.sd_model.first_stage_model.to(devices.device)

            p = processing.StableDiffusionProcessingTxt2Img(
                sd_model=shared.sd_model,
                do_not_save_grid=True,
                do_not_save_samples=True,
            )

            if preview_from_txt2img:
                p.prompt = preview_prompt
                p.negative_prompt = preview_negative_prompt
                p.steps = preview_steps
                p.sampler_index = preview_sampler_index
                p.cfg_scale = preview_cfg_scale
                p.seed = preview_seed
                p.width = preview_width
                p.height = preview_height
            else:
                p.prompt = entries[0].cond_text
                p.steps = 20

            preview_text = p.prompt

            processed = processing.process_images(p)
            image = processed.images[0] if len(processed.images)>0 else None

            if unload:
                shared.sd_model.cond_stage_model.to(devices.cpu)
                shared.sd_model.first_stage_model.to(devices.cpu)

            if image is not None:
                shared.state.current_image = image
464
                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename)
A
aria1th 已提交
465
                last_saved_image += f", prompt: {preview_text}"
A
AUTOMATIC 已提交
466 467 468 469 470

        shared.state.job_no = hypernetwork.step

        shared.state.textinfo = f"""
<p>
A
AngelBottomless 已提交
471
Loss: {previous_mean_loss:.7f}<br/>
A
AUTOMATIC 已提交
472
Step: {hypernetwork.step}<br/>
473
Last prompt: {html.escape(entries[0].cond_text)}<br/>
D
DepFA 已提交
474
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
A
AUTOMATIC 已提交
475 476 477
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
A
AngelBottomless 已提交
478 479
        
    report_statistics(loss_dict)
A
AUTOMATIC 已提交
480 481 482 483
    checkpoint = sd_models.select_checkpoint()

    hypernetwork.sd_checkpoint = checkpoint.hash
    hypernetwork.sd_checkpoint_name = checkpoint.model_name
484 485 486
    # Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
    hypernetwork.name = hypernetwork_name
    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
A
AUTOMATIC 已提交
487 488 489
    hypernetwork.save(filename)

    return hypernetwork, filename