hypernetwork.py 18.2 KB
Newer Older
D
discus0434 已提交
1
import csv
A
AUTOMATIC 已提交
2 3 4 5 6 7 8
import datetime
import glob
import html
import os
import sys
import traceback

D
discus0434 已提交
9
import modules.textual_inversion.dataset
A
AUTOMATIC 已提交
10
import torch
D
discus0434 已提交
11
import tqdm
D
update  
discus0434 已提交
12
from einops import rearrange, repeat
D
discus0434 已提交
13 14
from ldm.util import default
from modules import devices, processing, sd_models, shared
15
from modules.textual_inversion import textual_inversion
16
from modules.textual_inversion.learn_schedule import LearnRateScheduler
D
discus0434 已提交
17
from torch import einsum
18

A
AngelBottomless 已提交
19
from collections import defaultdict, deque
A
AngelBottomless 已提交
20
from statistics import stdev, mean
21

A
AUTOMATIC 已提交
22
class HypernetworkModule(torch.nn.Module):
A
AUTOMATIC 已提交
23
    multiplier = 1.0
D
discus0434 已提交
24 25 26 27 28 29 30 31
    activation_dict = {
        "relu": torch.nn.ReLU,
        "leakyrelu": torch.nn.LeakyReLU,
        "elu": torch.nn.ELU,
        "swish": torch.nn.Hardswish,
    }

    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False):
A
AUTOMATIC 已提交
32
        super().__init__()
33

D
update  
discus0434 已提交
34
        assert layer_structure is not None, "layer_structure must not be None"
35 36
        assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
        assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
D
discus0434 已提交
37

38 39
        linears = []
        for i in range(len(layer_structure) - 1):
D
discus0434 已提交
40 41

            # Add a fully-connected layer
42
            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
D
discus0434 已提交
43 44

            # Add an activation func
D
discus0434 已提交
45
            if activation_func == "linear" or activation_func is None:
D
discus0434 已提交
46 47 48
                pass
            elif activation_func in self.activation_dict:
                linears.append(self.activation_dict[activation_func]())
49
            else:
D
discus0434 已提交
50
                raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
D
discus0434 已提交
51 52

            # Add layer normalization
A
aria1th 已提交
53 54
            if add_layer_norm:
                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
55

D
discus0434 已提交
56 57 58
            # Add dropout expect last layer
            if use_dropout and i < len(layer_structure) - 3:
                linears.append(torch.nn.Dropout(p=0.3))
D
discus0434 已提交
59

60
        self.linear = torch.nn.Sequential(*linears)
A
AUTOMATIC 已提交
61 62

        if state_dict is not None:
63 64
            self.fix_old_state_dict(state_dict)
            self.load_state_dict(state_dict)
A
AUTOMATIC 已提交
65
        else:
66
            for layer in self.linear:
67
                if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
D
update  
discus0434 已提交
68 69
                    layer.weight.data.normal_(mean=0.0, std=0.01)
                    layer.bias.data.zero_()
A
AUTOMATIC 已提交
70 71 72

        self.to(devices.device)

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    def fix_old_state_dict(self, state_dict):
        changes = {
            'linear1.bias': 'linear.0.bias',
            'linear1.weight': 'linear.0.weight',
            'linear2.bias': 'linear.1.bias',
            'linear2.weight': 'linear.1.weight',
        }

        for fr, to in changes.items():
            x = state_dict.get(fr, None)
            if x is None:
                continue

            del state_dict[fr]
            state_dict[to] = x
88

A
AUTOMATIC 已提交
89
    def forward(self, x):
90 91 92
        return x + self.linear(x) * self.multiplier

    def trainables(self):
93
        layer_structure = []
94
        for layer in self.linear:
95
            if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
D
update  
discus0434 已提交
96
                layer_structure += [layer.weight, layer.bias]
97
        return layer_structure
A
AUTOMATIC 已提交
98 99 100 101


def apply_strength(value=None):
    HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
A
AUTOMATIC 已提交
102 103 104 105 106 107


class Hypernetwork:
    filename = None
    name = None

D
discus0434 已提交
108
    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False):
A
AUTOMATIC 已提交
109 110 111 112 113 114
        self.filename = None
        self.name = name
        self.layers = {}
        self.step = 0
        self.sd_checkpoint = None
        self.sd_checkpoint_name = None
115
        self.layer_structure = layer_structure
D
update  
discus0434 已提交
116
        self.activation_func = activation_func
D
discus0434 已提交
117 118
        self.add_layer_norm = add_layer_norm
        self.use_dropout = use_dropout
A
AUTOMATIC 已提交
119

120
        for size in enable_sizes or []:
121
            self.layers[size] = (
D
discus0434 已提交
122 123
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
124
            )
A
AUTOMATIC 已提交
125 126 127 128 129 130

    def weights(self):
        res = []

        for k, layers in self.layers.items():
            for layer in layers:
A
aria1th 已提交
131
                layer.train()
132
                res += layer.trainables()
A
AUTOMATIC 已提交
133 134 135 136 137 138 139 140 141 142 143

        return res

    def save(self, filename):
        state_dict = {}

        for k, v in self.layers.items():
            state_dict[k] = (v[0].state_dict(), v[1].state_dict())

        state_dict['step'] = self.step
        state_dict['name'] = self.name
144
        state_dict['layer_structure'] = self.layer_structure
D
update  
discus0434 已提交
145
        state_dict['activation_func'] = self.activation_func
D
discus0434 已提交
146 147
        state_dict['is_layer_norm'] = self.add_layer_norm
        state_dict['use_dropout'] = self.use_dropout
A
AUTOMATIC 已提交
148 149 150 151 152 153 154 155 156 157 158 159
        state_dict['sd_checkpoint'] = self.sd_checkpoint
        state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name

        torch.save(state_dict, filename)

    def load(self, filename):
        self.filename = filename
        if self.name is None:
            self.name = os.path.splitext(os.path.basename(filename))[0]

        state_dict = torch.load(filename, map_location='cpu')

160
        self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
D
update  
discus0434 已提交
161
        self.activation_func = state_dict.get('activation_func', None)
D
discus0434 已提交
162 163
        self.add_layer_norm = state_dict.get('is_layer_norm', False)
        self.use_dropout = state_dict.get('use_dropout', False)
164

A
AUTOMATIC 已提交
165 166
        for size, sd in state_dict.items():
            if type(size) == int:
167
                self.layers[size] = (
D
discus0434 已提交
168 169
                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout),
170
                )
A
AUTOMATIC 已提交
171 172 173 174 175 176 177

        self.name = state_dict.get('name', self.name)
        self.step = state_dict.get('step', 0)
        self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
        self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)


A
AUTOMATIC 已提交
178
def list_hypernetworks(path):
A
AUTOMATIC 已提交
179
    res = {}
A
AUTOMATIC 已提交
180 181 182 183
    for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
        name = os.path.splitext(os.path.basename(filename))[0]
        res[name] = filename
    return res
A
AUTOMATIC 已提交
184

A
AUTOMATIC 已提交
185 186 187 188 189

def load_hypernetwork(filename):
    path = shared.hypernetworks.get(filename, None)
    if path is not None:
        print(f"Loading hypernetwork {filename}")
A
AUTOMATIC 已提交
190
        try:
A
AUTOMATIC 已提交
191 192 193
            shared.loaded_hypernetwork = Hypernetwork()
            shared.loaded_hypernetwork.load(path)

A
AUTOMATIC 已提交
194
        except Exception:
A
AUTOMATIC 已提交
195
            print(f"Error loading hypernetwork {path}", file=sys.stderr)
A
AUTOMATIC 已提交
196
            print(traceback.format_exc(), file=sys.stderr)
A
AUTOMATIC 已提交
197 198 199
    else:
        if shared.loaded_hypernetwork is not None:
            print(f"Unloading hypernetwork")
A
AUTOMATIC 已提交
200

A
AUTOMATIC 已提交
201
        shared.loaded_hypernetwork = None
A
AUTOMATIC 已提交
202 203


M
Milly 已提交
204 205 206 207 208 209 210 211 212 213 214
def find_closest_hypernetwork_name(search: str):
    if not search:
        return None
    search = search.lower()
    applicable = [name for name in shared.hypernetworks if search in name.lower()]
    if not applicable:
        return None
    applicable = sorted(applicable, key=lambda name: len(name))
    return applicable[0]


A
AUTOMATIC 已提交
215 216
def apply_hypernetwork(hypernetwork, context, layer=None):
    hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
A
AUTOMATIC 已提交
217

A
AUTOMATIC 已提交
218 219
    if hypernetwork_layers is None:
        return context, context
A
AUTOMATIC 已提交
220

A
AUTOMATIC 已提交
221 222 223
    if layer is not None:
        layer.hyper_k = hypernetwork_layers[0]
        layer.hyper_v = hypernetwork_layers[1]
A
AUTOMATIC 已提交
224

A
AUTOMATIC 已提交
225 226 227
    context_k = hypernetwork_layers[0](context)
    context_v = hypernetwork_layers[1](context)
    return context_k, context_v
A
AUTOMATIC 已提交
228 229


A
AUTOMATIC 已提交
230 231 232 233 234
def attention_CrossAttention_forward(self, x, context=None, mask=None):
    h = self.heads

    q = self.to_q(x)
    context = default(context, x)
A
AUTOMATIC 已提交
235

A
AUTOMATIC 已提交
236
    context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
A
AUTOMATIC 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
    k = self.to_k(context_k)
    v = self.to_v(context_v)

    q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

    sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

    if mask is not None:
        mask = rearrange(mask, 'b ... -> b (...)')
        max_neg_value = -torch.finfo(sim.dtype).max
        mask = repeat(mask, 'b j -> (b h) () j', h=h)
        sim.masked_fill_(~mask, max_neg_value)

    # attention, what we cannot get enough of
    attn = sim.softmax(dim=-1)

    out = einsum('b i j, b j d -> b i d', attn, v)
    out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
    return self.to_out(out)


258 259 260 261 262 263 264 265 266 267 268 269 270 271
def stack_conds(conds):
    if len(conds) == 1:
        return torch.stack(conds)

    # same as in reconstruct_multicond_batch
    token_count = max([x.shape[0] for x in conds])
    for i in range(len(conds)):
        if conds[i].shape[0] != token_count:
            last_vector = conds[i][-1:]
            last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
            conds[i] = torch.vstack([conds[i], last_vector_repeated])

    return torch.stack(conds)

272

A
AngelBottomless 已提交
273 274 275 276 277 278 279 280 281 282
def statistics(data):
    total_information = f"loss:{mean(data):.3f}"+u"\u00B1"+f"({stdev(data)/ (len(data)**0.5):.3f})"
    recent_data = data[-32:]
    recent_information = f"recent 32 loss:{mean(recent_data):.3f}"+u"\u00B1"+f"({stdev(recent_data)/ (len(recent_data)**0.5):.3f})"
    return total_information, recent_information


def report_statistics(loss_info:dict):
    keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
    for key in keys:
D
DepFA 已提交
283 284
        try:
            print("Loss statistics for file " + key)
A
AngelBottomless 已提交
285
            info, recent = statistics(list(loss_info[key]))
D
DepFA 已提交
286 287 288 289
            print(info)
            print(recent)
        except Exception as e:
            print(e)
A
AngelBottomless 已提交
290 291 292



293
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
T
timntorres 已提交
294
    # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
295 296
    from modules import images

A
AUTOMATIC 已提交
297
    assert hypernetwork_name, 'hypernetwork not selected'
A
AUTOMATIC 已提交
298

A
AUTOMATIC 已提交
299 300 301
    path = shared.hypernetworks.get(hypernetwork_name, None)
    shared.loaded_hypernetwork = Hypernetwork()
    shared.loaded_hypernetwork.load(path)
A
AUTOMATIC 已提交
302 303 304 305 306 307 308

    shared.state.textinfo = "Initializing hypernetwork training..."
    shared.state.job_count = steps

    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')

    log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
309
    unload = shared.opts.unload_models_when_training
A
AUTOMATIC 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

    if save_hypernetwork_every > 0:
        hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
        os.makedirs(hypernetwork_dir, exist_ok=True)
    else:
        hypernetwork_dir = None

    if create_image_every > 0:
        images_dir = os.path.join(log_directory, "images")
        os.makedirs(images_dir, exist_ok=True)
    else:
        images_dir = None

    shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
    with torch.autocast("cuda"):
325
        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
326 327 328
    if unload:
        shared.sd_model.cond_stage_model.to(devices.cpu)
        shared.sd_model.first_stage_model.to(devices.cpu)
A
AUTOMATIC 已提交
329

A
AUTOMATIC 已提交
330
    hypernetwork = shared.loaded_hypernetwork
A
aria1th 已提交
331 332 333 334
    weights = hypernetwork.weights()
    for weight in weights:
        weight.requires_grad = True

A
AngelBottomless 已提交
335
    size = len(ds.indexes)
A
AngelBottomless 已提交
336
    loss_dict = defaultdict(lambda : deque(maxlen = 1024))
A
AngelBottomless 已提交
337
    losses = torch.zeros((size,))
A
AngelBottomless 已提交
338
    previous_mean_losses = [0]
A
AngelBottomless 已提交
339 340
    previous_mean_loss = 0
    print("Mean loss of {} elements".format(size))
A
AUTOMATIC 已提交
341 342 343

    last_saved_file = "<none>"
    last_saved_image = "<none>"
344
    forced_filename = "<none>"
A
AUTOMATIC 已提交
345 346 347 348 349

    ititial_step = hypernetwork.step or 0
    if ititial_step > steps:
        return hypernetwork, filename

350
    scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
A
aria1th 已提交
351 352
    # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
    optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
A
AUTOMATIC 已提交
353

354 355
    steps_without_grad = 0

A
AUTOMATIC 已提交
356
    pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
357
    for i, entries in pbar:
A
AUTOMATIC 已提交
358
        hypernetwork.step = i + ititial_step
A
AngelBottomless 已提交
359
        if len(loss_dict) > 0:
A
AngelBottomless 已提交
360 361
            previous_mean_losses = [i[-1] for i in loss_dict.values()]
            previous_mean_loss = mean(previous_mean_losses)
A
AngelBottomless 已提交
362
            
363 364 365
        scheduler.apply(optimizer, hypernetwork.step)
        if scheduler.finished:
            break
A
AUTOMATIC 已提交
366 367 368 369 370

        if shared.state.interrupted:
            break

        with torch.autocast("cuda"):
371
            c = stack_conds([entry.cond for entry in entries]).to(devices.device)
D
update  
discus0434 已提交
372
            # c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
373 374
            x = torch.stack([entry.latent for entry in entries]).to(devices.device)
            loss = shared.sd_model(x, c)[0]
A
AUTOMATIC 已提交
375
            del x
376
            del c
A
AUTOMATIC 已提交
377 378

            losses[hypernetwork.step % losses.shape[0]] = loss.item()
A
AngelBottomless 已提交
379
            for entry in entries:
A
AngelBottomless 已提交
380
                loss_dict[entry.filename].append(loss.item())
A
AngelBottomless 已提交
381
                
A
aria1th 已提交
382
            optimizer.zero_grad()
383
            weights[0].grad = None
A
AUTOMATIC 已提交
384
            loss.backward()
385 386 387 388 389 390 391

            if weights[0].grad is None:
                steps_without_grad += 1
            else:
                steps_without_grad = 0
            assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'

A
AUTOMATIC 已提交
392
            optimizer.step()
393

A
AngelBottomless 已提交
394
        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
395
            raise RuntimeError("Loss diverged.")
A
AngelBottomless 已提交
396 397 398 399 400 401 402
        
        if len(previous_mean_losses) > 1:
            std = stdev(previous_mean_losses)
        else:
            std = 0
        dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
        pbar.set_description(dataset_loss_info)
A
AUTOMATIC 已提交
403 404

        if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
405 406 407
            # Before saving, change name to match current checkpoint.
            hypernetwork.name = f'{hypernetwork_name}-{hypernetwork.step}'
            last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
A
AUTOMATIC 已提交
408 409
            hypernetwork.save(last_saved_file)

410
        textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
A
AngelBottomless 已提交
411
            "loss": f"{previous_mean_loss:.7f}",
D
update  
discus0434 已提交
412
            "learn_rate": scheduler.learn_rate
413
        })
414

A
AUTOMATIC 已提交
415
        if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
416 417
            forced_filename = f'{hypernetwork_name}-{hypernetwork.step}'
            last_saved_image = os.path.join(images_dir, forced_filename)
A
AUTOMATIC 已提交
418

A
aria1th 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
            optimizer.zero_grad()
            shared.sd_model.cond_stage_model.to(devices.device)
            shared.sd_model.first_stage_model.to(devices.device)

            p = processing.StableDiffusionProcessingTxt2Img(
                sd_model=shared.sd_model,
                do_not_save_grid=True,
                do_not_save_samples=True,
            )

            if preview_from_txt2img:
                p.prompt = preview_prompt
                p.negative_prompt = preview_negative_prompt
                p.steps = preview_steps
                p.sampler_index = preview_sampler_index
                p.cfg_scale = preview_cfg_scale
                p.seed = preview_seed
                p.width = preview_width
                p.height = preview_height
            else:
                p.prompt = entries[0].cond_text
                p.steps = 20

            preview_text = p.prompt

            processed = processing.process_images(p)
            image = processed.images[0] if len(processed.images)>0 else None

            if unload:
                shared.sd_model.cond_stage_model.to(devices.cpu)
                shared.sd_model.first_stage_model.to(devices.cpu)

            if image is not None:
                shared.state.current_image = image
453
                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename)
A
aria1th 已提交
454
                last_saved_image += f", prompt: {preview_text}"
A
AUTOMATIC 已提交
455 456 457 458 459

        shared.state.job_no = hypernetwork.step

        shared.state.textinfo = f"""
<p>
A
AngelBottomless 已提交
460
Loss: {previous_mean_loss:.7f}<br/>
A
AUTOMATIC 已提交
461
Step: {hypernetwork.step}<br/>
462
Last prompt: {html.escape(entries[0].cond_text)}<br/>
D
DepFA 已提交
463
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
A
AUTOMATIC 已提交
464 465 466
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
A
AngelBottomless 已提交
467 468
        
    report_statistics(loss_dict)
A
AUTOMATIC 已提交
469 470 471 472
    checkpoint = sd_models.select_checkpoint()

    hypernetwork.sd_checkpoint = checkpoint.hash
    hypernetwork.sd_checkpoint_name = checkpoint.model_name
473 474 475
    # Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
    hypernetwork.name = hypernetwork_name
    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
A
AUTOMATIC 已提交
476 477 478
    hypernetwork.save(filename)

    return hypernetwork, filename