webui.py 47.4 KB
Newer Older
A
first  
AUTOMATIC 已提交
1
import argparse, os, sys, glob
A
AUTOMATIC 已提交
2 3
from collections import namedtuple

A
first  
AUTOMATIC 已提交
4 5 6 7 8
import torch
import torch.nn as nn
import numpy as np
import gradio as gr
from omegaconf import OmegaConf
9
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
A
first  
AUTOMATIC 已提交
10 11 12 13 14
from itertools import islice
from einops import rearrange, repeat
from torch import autocast
import mimetypes
import random
15
import math
A
AUTOMATIC 已提交
16 17
import html
import time
A
AUTOMATIC 已提交
18 19
import json
import traceback
A
first  
AUTOMATIC 已提交
20

A
AUTOMATIC 已提交
21
import k_diffusion.sampling
A
first  
AUTOMATIC 已提交
22 23 24
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
A
AUTOMATIC 已提交
25
import ldm.modules.encoders.modules
A
first  
AUTOMATIC 已提交
26

27 28 29 30 31 32 33 34
try:
    # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.

    from transformers import logging
    logging.set_verbosity_error()
except:
    pass

A
first  
AUTOMATIC 已提交
35 36 37 38 39 40 41 42
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')

# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8

43
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
44
invalid_filename_chars = '<>:"/\|?*\n'
A
AUTOMATIC 已提交
45
config_filename = "config.json"
A
AUTOMATIC 已提交
46

A
first  
AUTOMATIC 已提交
47 48 49
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/stable-diffusion/v1-inference.yaml", help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default="models/ldm/stable-diffusion-v1/model.ckpt", help="path to checkpoint of model",)
50
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) # i disagree with where you're putting it but since all guidefags are doing it this way, there you go
51
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
52
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware accleration in browser)")
53
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
A
AUTOMATIC 已提交
54
parser.add_argument("--embeddings-dir", type=str, default='embeddings', help="embeddings dirtectory for textual inversion (default: embeddings)")
55

A
AUTOMATIC 已提交
56
cmd_opts = parser.parse_args()
A
first  
AUTOMATIC 已提交
57

58 59 60 61 62 63
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
A
first  
AUTOMATIC 已提交
64

A
AUTOMATIC 已提交
65 66
SamplerData = namedtuple('SamplerData', ['name', 'constructor'])
samplers = [
A
AUTOMATIC 已提交
67
    *[SamplerData(x[0], lambda m, funcname=x[1]: KDiffusionSampler(m, funcname)) for x in [
A
AUTOMATIC 已提交
68 69 70 71 72 73 74
        ('LMS', 'sample_lms'),
        ('Heun', 'sample_heun'),
        ('Euler', 'sample_euler'),
        ('Euler ancestral', 'sample_euler_ancestral'),
        ('DPM 2', 'sample_dpm_2'),
        ('DPM 2 Ancestral', 'sample_dpm_2_ancestral'),
    ] if hasattr(k_diffusion.sampling, x[1])],
A
AUTOMATIC 已提交
75 76
    SamplerData('DDIM', lambda m: DDIMSampler(model)),
    SamplerData('PLMS', lambda m: PLMSSampler(model)),
A
AUTOMATIC 已提交
77
]
A
AUTOMATIC 已提交
78
samplers_for_img2img = [x for x in samplers if x.name != 'DDIM' and x.name != 'PLMS']
A
AUTOMATIC 已提交
79

A
AUTOMATIC 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
RealesrganModelInfo = namedtuple("RealesrganModelInfo", ["name", "location", "model", "netscale"])

try:
    from basicsr.archs.rrdbnet_arch import RRDBNet
    from realesrgan import RealESRGANer
    from realesrgan.archs.srvgg_arch import SRVGGNetCompact

    realesrgan_models = [
        RealesrganModelInfo(
            name="Real-ESRGAN 4x plus",
            location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
            netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
        ),
        RealesrganModelInfo(
            name="Real-ESRGAN 4x plus anime 6B",
            location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
            netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
        ),
A
AUTOMATIC 已提交
98 99 100 101 102
        RealesrganModelInfo(
            name="Real-ESRGAN 2x plus",
            location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
            netscale=2, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
        ),
A
AUTOMATIC 已提交
103 104 105 106 107 108 109 110 111
    ]
    have_realesrgan = True
except:
    print("Error loading Real-ESRGAN:", file=sys.stderr)
    print(traceback.format_exc(), file=sys.stderr)

    realesrgan_models = [RealesrganModelInfo('None', '', 0, None)]
    have_realesrgan = False

A
AUTOMATIC 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125

class Options:
    data = None
    data_labels = {
        "outdir": ("", "Output dictectory; if empty, defaults to 'outputs/*'"),
        "samples_save": (True, "Save indiviual samples"),
        "samples_format": ('png', 'File format for indiviual samples'),
        "grid_save": (True, "Save image grids"),
        "grid_format": ('png', 'File format for grids'),
        "grid_extended_filename": (False, "Add extended info (seed, prompt) to filename when saving grid"),
        "n_rows": (-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", -1, 16),
        "jpeg_quality": (80, "Quality for saved jpeg images", 1, 100),
        "verify_input": (True, "Check input, and produce warning if it's too long"),
        "enable_pnginfo": (True, "Save text information about generation parameters as chunks to png files"),
126
        "prompt_matrix_add_to_start": (True, "In prompt matrix, add the variable combination of text to the start of the prompt, rather than the end"),
A
AUTOMATIC 已提交
127
        "sd_upscale_overlap": (64, "Overlap for tiles for SD upscale. The smaller it is, the less smooth transition from one tile to another", 0, 256, 16),
A
AUTOMATIC 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    }

    def __init__(self):
        self.data = {k: v[0] for k, v in self.data_labels.items()}

    def __setattr__(self, key, value):
        if self.data is not None:
            if key in self.data:
                self.data[key] = value

        return super(Options, self).__setattr__(key, value)

    def __getattr__(self, item):
        if self.data is not None:
            if item in self.data:
                return self.data[item]

145 146 147
        if item in self.data_labels:
            return self.data_labels[item][0]

A
AUTOMATIC 已提交
148 149 150 151 152 153 154 155 156 157 158
        return super(Options, self).__getattribute__(item)

    def save(self, filename):
        with open(filename, "w", encoding="utf8") as file:
            json.dump(self.data, file)

    def load(self, filename):
        with open(filename, "r", encoding="utf8") as file:
            self.data = json.load(file)


A
first  
AUTOMATIC 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
def chunk(it, size):
    it = iter(it)
    return iter(lambda: tuple(islice(it, size)), ())


def load_model_from_config(config, ckpt, verbose=False):
    print(f"Loading model from {ckpt}")
    pl_sd = torch.load(ckpt, map_location="cpu")
    if "global_step" in pl_sd:
        print(f"Global Step: {pl_sd['global_step']}")
    sd = pl_sd["state_dict"]
    model = instantiate_from_config(config.model)
    m, u = model.load_state_dict(sd, strict=False)
    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)

    model.cuda()
    model.eval()
    return model


class CFGDenoiser(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.inner_model = model

    def forward(self, x, sigma, uncond, cond, cond_scale):
        x_in = torch.cat([x] * 2)
        sigma_in = torch.cat([sigma] * 2)
        cond_in = torch.cat([uncond, cond])
        uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
        return uncond + (cond - uncond) * cond_scale


A
AUTOMATIC 已提交
197
class KDiffusionSampler:
A
AUTOMATIC 已提交
198
    def __init__(self, m, funcname):
A
AUTOMATIC 已提交
199
        self.model = m
A
AUTOMATIC 已提交
200 201
        self.model_wrap = k_diffusion.external.CompVisDenoiser(m)
        self.funcname = funcname
A
AUTOMATIC 已提交
202
        self.func = getattr(k_diffusion.sampling, self.funcname)
A
AUTOMATIC 已提交
203 204 205 206 207

    def sample(self, S, conditioning, batch_size, shape, verbose, unconditional_guidance_scale, unconditional_conditioning, eta, x_T):
        sigmas = self.model_wrap.get_sigmas(S)
        x = x_T * sigmas[0]
        model_wrap_cfg = CFGDenoiser(self.model_wrap)
A
AUTOMATIC 已提交
208

A
AUTOMATIC 已提交
209
        samples_ddim = self.func(model_wrap_cfg, x, sigmas, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}, disable=False)
A
AUTOMATIC 已提交
210 211 212 213

        return samples_ddim, None


A
AUTOMATIC 已提交
214
def create_random_tensors(shape, seeds):
A
AUTOMATIC 已提交
215
    xs = []
A
AUTOMATIC 已提交
216 217 218 219 220 221 222
    for seed in seeds:
        torch.manual_seed(seed)

        # randn results depend on device; gpu and cpu get different results for same seed;
        # the way I see it, it's better to do this on CPU, so that everyone gets same result;
        # but the original script had it like this so i do not dare change it for now because
        # it will break everyone's seeds.
A
AUTOMATIC 已提交
223 224 225 226 227
        xs.append(torch.randn(shape, device=device))
    x = torch.stack(xs)
    return x


H
hlky 已提交
228 229 230
def torch_gc():
    torch.cuda.empty_cache()
    torch.cuda.ipc_collect()
A
AUTOMATIC 已提交
231

232

233
def save_image(image, path, basename, seed, prompt, extension, info=None, short_filename=False):
234 235 236 237 238 239 240
    prompt = sanitize_filename_part(prompt)

    if short_filename:
        filename = f"{basename}.{extension}"
    else:
        filename = f"{basename}-{seed}-{prompt[:128]}.{extension}"

A
AUTOMATIC 已提交
241
    if extension == 'png' and opts.enable_pnginfo and info is not None:
242 243 244 245 246
        pnginfo = PngImagePlugin.PngInfo()
        pnginfo.add_text("parameters", info)
    else:
        pnginfo = None

A
AUTOMATIC 已提交
247
    image.save(os.path.join(path, filename), quality=opts.jpeg_quality, pnginfo=pnginfo)
248 249


A
AUTOMATIC 已提交
250 251 252 253
def sanitize_filename_part(text):
    return text.replace(' ', '_').translate({ord(x): '' for x in invalid_filename_chars})[:128]


A
AUTOMATIC 已提交
254 255 256 257 258
def plaintext_to_html(text):
    text = "".join([f"<p>{html.escape(x)}</p>\n" for x in text.split('\n')])
    return text


A
first  
AUTOMATIC 已提交
259 260
def load_GFPGAN():
    model_name = 'GFPGANv1.3'
A
AUTOMATIC 已提交
261
    model_path = os.path.join(cmd_opts.gfpgan_dir, 'experiments/pretrained_models', model_name + '.pth')
A
first  
AUTOMATIC 已提交
262 263 264
    if not os.path.isfile(model_path):
        raise Exception("GFPGAN model not found at path "+model_path)

A
AUTOMATIC 已提交
265
    sys.path.append(os.path.abspath(cmd_opts.gfpgan_dir))
A
first  
AUTOMATIC 已提交
266 267 268 269 270
    from gfpgan import GFPGANer

    return GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)


271
def image_grid(imgs, batch_size, force_n_rows=None):
A
AUTOMATIC 已提交
272 273
    if force_n_rows is not None:
        rows = force_n_rows
A
AUTOMATIC 已提交
274 275 276
    elif opts.n_rows > 0:
        rows = opts.n_rows
    elif opts.n_rows == 0:
277 278
        rows = batch_size
    else:
A
AUTOMATIC 已提交
279
        rows = math.sqrt(len(imgs))
280
        rows = round(rows)
281 282

    cols = math.ceil(len(imgs) / rows)
A
first  
AUTOMATIC 已提交
283 284

    w, h = imgs[0].size
285
    grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
A
first  
AUTOMATIC 已提交
286 287 288 289 290 291

    for i, img in enumerate(imgs):
        grid.paste(img, box=(i % cols * w, i // cols * h))

    return grid

292

A
AUTOMATIC 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])


def split_grid(image, tile_w=512, tile_h=512, overlap=64):
    w = image.width
    h = image.height

    now = tile_w - overlap  # non-overlap width
    noh = tile_h - overlap

    cols = math.ceil((w - overlap) / now)
    rows = math.ceil((h - overlap) / noh)

    grid = Grid([], tile_w, tile_h, w, h, overlap)
    for row in range(rows):
        row_images = []

        y = row * noh

        if y + tile_h >= h:
            y = h - tile_h

        for col in range(cols):
            x = col * now

            if x+tile_w >= w:
                x = w - tile_w

            tile = image.crop((x, y, x + tile_w, y + tile_h))

            row_images.append([x, tile_w, tile])

        grid.tiles.append([y, tile_h, row_images])

    return grid


def combine_grid(grid):
    def make_mask_image(r):
        r = r * 255 / grid.overlap
        r = r.astype(np.uint8)
        return Image.fromarray(r, 'L')

    mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
    mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))

    combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
    for y, h, row in grid.tiles:
        combined_row = Image.new("RGB", (grid.image_w, h))
        for x, w, tile in row:
            if x == 0:
                combined_row.paste(tile, (0, 0))
                continue

            combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
            combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))

        if y == 0:
            combined_image.paste(combined_row, (0, 0))
            continue

        combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
        combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))

    return combined_image


360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
def draw_prompt_matrix(im, width, height, all_prompts):
    def wrap(text, d, font, line_length):
        lines = ['']
        for word in text.split():
            line = f'{lines[-1]} {word}'.strip()
            if d.textlength(line, font=font) <= line_length:
                lines[-1] = line
            else:
                lines.append(word)
        return '\n'.join(lines)

    def draw_texts(pos, x, y, texts, sizes):
        for i, (text, size) in enumerate(zip(texts, sizes)):
            active = pos & (1 << i) != 0

            if not active:
                text = '\u0336'.join(text) + '\u0336'

            d.multiline_text((x, y + size[1] / 2), text, font=fnt, fill=color_active if active else color_inactive, anchor="mm", align="center")

            y += size[1] + line_spacing

    fontsize = (width + height) // 25
    line_spacing = fontsize // 2
    fnt = ImageFont.truetype("arial.ttf", fontsize)
    color_active = (0, 0, 0)
    color_inactive = (153, 153, 153)

    pad_top = height // 4
A
AUTOMATIC 已提交
389
    pad_left = width * 3 // 4 if len(all_prompts) > 2 else 0
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424

    cols = im.width // width
    rows = im.height // height

    prompts = all_prompts[1:]

    result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
    result.paste(im, (pad_left, pad_top))

    d = ImageDraw.Draw(result)

    boundary = math.ceil(len(prompts) / 2)
    prompts_horiz = [wrap(x, d, fnt, width) for x in prompts[:boundary]]
    prompts_vert = [wrap(x, d, fnt, pad_left) for x in prompts[boundary:]]

    sizes_hor = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_horiz]]
    sizes_ver = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_vert]]
    hor_text_height = sum([x[1] + line_spacing for x in sizes_hor]) - line_spacing
    ver_text_height = sum([x[1] + line_spacing for x in sizes_ver]) - line_spacing

    for col in range(cols):
        x = pad_left + width * col + width / 2
        y = pad_top / 2 - hor_text_height / 2

        draw_texts(col, x, y, prompts_horiz, sizes_hor)

    for row in range(rows):
        x = pad_left / 2
        y = pad_top + height * row + height / 2 - ver_text_height / 2

        draw_texts(row, x, y, prompts_vert, sizes_ver)

    return result


A
AUTOMATIC 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
def resize_image(resize_mode, im, width, height):
    if resize_mode == 0:
        res = im.resize((width, height), resample=LANCZOS)
    elif resize_mode == 1:
        ratio = width / height
        src_ratio = im.width / im.height

        src_w = width if ratio > src_ratio else im.width * height // im.height
        src_h = height if ratio <= src_ratio else im.height * width // im.width

        resized = im.resize((src_w, src_h), resample=LANCZOS)
        res = Image.new("RGB", (width, height))
        res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
    else:
        ratio = width / height
        src_ratio = im.width / im.height

        src_w = width if ratio < src_ratio else im.width * height // im.height
        src_h = height if ratio >= src_ratio else im.height * width // im.width

        resized = im.resize((src_w, src_h), resample=LANCZOS)
        res = Image.new("RGB", (width, height))
        res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))

        if ratio < src_ratio:
            fill_height = height // 2 - src_h // 2
            res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
            res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
453
        elif ratio > src_ratio:
A
AUTOMATIC 已提交
454 455 456 457 458 459 460
            fill_width = width // 2 - src_w // 2
            res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
            res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))

    return res


461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
def check_prompt_length(prompt, comments):
    """this function tests if prompt is too long, and if so, adds a message to comments"""

    tokenizer = model.cond_stage_model.tokenizer
    max_length = model.cond_stage_model.max_length

    info = model.cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length, return_overflowing_tokens=True, padding="max_length", return_tensors="pt")
    ovf = info['overflowing_tokens'][0]
    overflowing_count = ovf.shape[0]
    if overflowing_count == 0:
        return

    vocab = {v: k for k, v in tokenizer.get_vocab().items()}
    overflowing_words = [vocab.get(int(x), "") for x in ovf]
    overflowing_text = tokenizer.convert_tokens_to_string(''.join(overflowing_words))

    comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")


A
AUTOMATIC 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493
def wrap_gradio_call(func):
    def f(*p1, **p2):
        t = time.perf_counter()
        res = list(func(*p1, **p2))
        elapsed = time.perf_counter() - t

        # last item is always HTML
        res[-1] = res[-1] + f"<p class='performance'>Time taken: {elapsed:.2f}s</p>"

        return tuple(res)

    return f


A
AUTOMATIC 已提交
494 495 496 497 498 499 500 501 502 503
GFPGAN = None
if os.path.exists(cmd_opts.gfpgan_dir):
    try:
        GFPGAN = load_GFPGAN()
        print("Loaded GFPGAN")
    except Exception:
        print("Error loading GFPGAN:", file=sys.stderr)
        print(traceback.format_exc(), file=sys.stderr)


504
class StableDiffuionModelHijack:
A
AUTOMATIC 已提交
505 506 507
    ids_lookup = {}
    word_embeddings = {}
    word_embeddings_checksums = {}
508
    fixes = None
A
AUTOMATIC 已提交
509 510 511
    used_custom_terms = []
    dir_mtime = None

512
    def load_textual_inversion_embeddings(self, dir, model):
A
AUTOMATIC 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
        mt = os.path.getmtime(dir)
        if self.dir_mtime is not None and mt <= self.dir_mtime:
            return

        self.dir_mtime = mt
        self.ids_lookup.clear()
        self.word_embeddings.clear()

        tokenizer = model.cond_stage_model.tokenizer

        def const_hash(a):
            r = 0
            for v in a:
                r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
            return r

        def process_file(path, filename):
            name = os.path.splitext(filename)[0]

            data = torch.load(path)
            param_dict = data['string_to_param']
            assert len(param_dict) == 1, 'embedding file has multiple terms in it'
            emb = next(iter(param_dict.items()))[1].reshape(768)
            self.word_embeddings[name] = emb
            self.word_embeddings_checksums[name] = f'{const_hash(emb)&0xffff:04x}'

            ids = tokenizer([name], add_special_tokens=False)['input_ids'][0]
540

A
AUTOMATIC 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
            first_id = ids[0]
            if first_id not in self.ids_lookup:
                self.ids_lookup[first_id] = []
            self.ids_lookup[first_id].append((ids, name))

        for fn in os.listdir(dir):
            try:
                process_file(os.path.join(dir, fn), fn)
            except:
                print(f"Error loading emedding {fn}:", file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
                continue

        print(f"Loaded a total of {len(self.word_embeddings)} text inversion embeddings.")

    def hijack(self, m):
        model_embeddings = m.cond_stage_model.transformer.text_model.embeddings

        model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
        m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)

A
AUTOMATIC 已提交
562

A
AUTOMATIC 已提交
563 564 565 566 567 568 569
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
    def __init__(self, wrapped, embeddings):
        super().__init__()
        self.wrapped = wrapped
        self.embeddings = embeddings
        self.tokenizer = wrapped.tokenizer
        self.max_length = wrapped.max_length
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
        self.token_mults = {}

        tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
        for text, ident in tokens_with_parens:
            mult = 1.0
            for c in text:
                if c == '[':
                    mult /= 1.1
                if c == ']':
                    mult *= 1.1
                if c == '(':
                    mult *= 1.1
                if c == ')':
                    mult /= 1.1

            if mult != 1.0:
                self.token_mults[ident] = mult
A
AUTOMATIC 已提交
587 588 589 590 591 592 593 594 595 596 597

    def forward(self, text):
        self.embeddings.fixes = []
        self.embeddings.used_custom_terms = []
        remade_batch_tokens = []
        id_start = self.wrapped.tokenizer.bos_token_id
        id_end = self.wrapped.tokenizer.eos_token_id
        maxlen = self.wrapped.max_length - 2

        cache = {}
        batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
598
        batch_multipliers = []
A
AUTOMATIC 已提交
599 600 601 602
        for tokens in batch_tokens:
            tuple_tokens = tuple(tokens)

            if tuple_tokens in cache:
603
                remade_tokens, fixes, multipliers = cache[tuple_tokens]
A
AUTOMATIC 已提交
604 605 606
            else:
                fixes = []
                remade_tokens = []
607 608
                multipliers = []
                mult = 1.0
A
AUTOMATIC 已提交
609 610 611 612 613 614 615

                i = 0
                while i < len(tokens):
                    token = tokens[i]

                    possible_matches = self.embeddings.ids_lookup.get(token, None)

616 617 618 619
                    mult_change = self.token_mults.get(token)
                    if mult_change is not None:
                        mult *= mult_change
                    elif possible_matches is None:
A
AUTOMATIC 已提交
620
                        remade_tokens.append(token)
621
                        multipliers.append(mult)
A
AUTOMATIC 已提交
622 623 624 625 626 627
                    else:
                        found = False
                        for ids, word in possible_matches:
                            if tokens[i:i+len(ids)] == ids:
                                fixes.append((len(remade_tokens), word))
                                remade_tokens.append(777)
628
                                multipliers.append(mult)
A
AUTOMATIC 已提交
629 630 631 632 633 634 635
                                i += len(ids) - 1
                                found = True
                                self.embeddings.used_custom_terms.append((word, self.embeddings.word_embeddings_checksums[word]))
                                break

                        if not found:
                            remade_tokens.append(token)
636
                            multipliers.append(mult)
A
AUTOMATIC 已提交
637 638 639 640 641

                    i += 1

                remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
                remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
642 643 644 645
                cache[tuple_tokens] = (remade_tokens, fixes, multipliers)

            multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
            multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
A
AUTOMATIC 已提交
646 647 648

            remade_batch_tokens.append(remade_tokens)
            self.embeddings.fixes.append(fixes)
649
            batch_multipliers.append(multipliers)
A
AUTOMATIC 已提交
650 651 652 653

        tokens = torch.asarray(remade_batch_tokens).to(self.wrapped.device)
        outputs = self.wrapped.transformer(input_ids=tokens)
        z = outputs.last_hidden_state
654 655 656 657 658 659 660 661

        # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
        batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device)
        original_mean = z.mean()
        z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
        new_mean = z.mean()
        z *= original_mean / new_mean

A
AUTOMATIC 已提交
662 663 664 665 666 667 668 669 670 671 672
        return z


class EmbeddingsWithFixes(nn.Module):
    def __init__(self, wrapped, embeddings):
        super().__init__()
        self.wrapped = wrapped
        self.embeddings = embeddings

    def forward(self, input_ids):
        batch_fixes = self.embeddings.fixes
673
        self.embeddings.fixes = None
A
AUTOMATIC 已提交
674 675 676

        inputs_embeds = self.wrapped(input_ids)

677 678 679 680
        if batch_fixes is not None:
            for fixes, tensor in zip(batch_fixes, inputs_embeds):
                for offset, word in fixes:
                    tensor[offset] = self.embeddings.word_embeddings[word]
A
AUTOMATIC 已提交
681 682


683
        return inputs_embeds
A
AUTOMATIC 已提交
684 685


686
def process_images(outpath, func_init, func_sample, prompt, seed, sampler_index, batch_size, n_iter, steps, cfg_scale, width, height, prompt_matrix, use_GFPGAN, do_not_save_grid=False, extra_generation_params=None):
A
AUTOMATIC 已提交
687
    """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
A
first  
AUTOMATIC 已提交
688

A
AUTOMATIC 已提交
689
    assert prompt is not None
H
hlky 已提交
690
    torch_gc()
A
first  
AUTOMATIC 已提交
691 692 693 694 695 696 697 698 699 700 701 702

    if seed == -1:
        seed = random.randrange(4294967294)
    seed = int(seed)

    os.makedirs(outpath, exist_ok=True)

    sample_path = os.path.join(outpath, "samples")
    os.makedirs(sample_path, exist_ok=True)
    base_count = len(os.listdir(sample_path))
    grid_count = len(os.listdir(outpath)) - 1

703 704
    comments = []

705
    prompt_matrix_parts = []
A
AUTOMATIC 已提交
706
    if prompt_matrix:
A
AUTOMATIC 已提交
707
        all_prompts = []
708
        prompt_matrix_parts = prompt.split("|")
A
AUTOMATIC 已提交
709
        combination_count = 2 ** (len(prompt_matrix_parts) - 1)
A
AUTOMATIC 已提交
710
        for combination_num in range(combination_count):
711
            selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1<<n)]
A
AUTOMATIC 已提交
712

713 714 715 716
            if opts.prompt_matrix_add_to_start:
                selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
            else:
                selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
A
AUTOMATIC 已提交
717

718
            all_prompts.append( ", ".join(selected_prompts))
A
AUTOMATIC 已提交
719

A
AUTOMATIC 已提交
720 721 722 723 724
        n_iter = math.ceil(len(all_prompts) / batch_size)
        all_seeds = len(all_prompts) * [seed]

        print(f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches.")
    else:
725

A
AUTOMATIC 已提交
726
        if opts.verify_input:
727 728 729 730 731 732 733
            try:
                check_prompt_length(prompt, comments)
            except:
                import traceback
                print("Error verifying input:", file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)

A
AUTOMATIC 已提交
734 735
        all_prompts = batch_size * n_iter * [prompt]
        all_seeds = [seed + x for x in range(len(all_prompts))]
A
AUTOMATIC 已提交
736

737 738 739 740 741 742 743 744 745 746 747 748 749
    generation_params = {
        "Steps": steps,
        "Sampler": samplers[sampler_index].name,
        "CFG scale": cfg_scale,
        "Seed": seed,
        "GFPGAN": ("GFPGAN" if use_GFPGAN and GFPGAN is not None else None)
    }

    if extra_generation_params is not None:
        generation_params.update(extra_generation_params)

    generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])

A
AUTOMATIC 已提交
750
    def infotext():
751
        return f"{prompt}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
A
AUTOMATIC 已提交
752 753

    if os.path.exists(cmd_opts.embeddings_dir):
754
        model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, model)
755

A
first  
AUTOMATIC 已提交
756
    output_images = []
A
AUTOMATIC 已提交
757
    with torch.no_grad(), autocast("cuda"), model.ema_scope():
A
AUTOMATIC 已提交
758 759
        init_data = func_init()

A
first  
AUTOMATIC 已提交
760
        for n in range(n_iter):
A
AUTOMATIC 已提交
761 762
            prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
            seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
A
AUTOMATIC 已提交
763

A
AUTOMATIC 已提交
764
            uc = model.get_learned_conditioning(len(prompts) * [""])
A
AUTOMATIC 已提交
765 766
            c = model.get_learned_conditioning(prompts)

767 768
            if len(model_hijack.used_custom_terms) > 0:
                comments.append("Used custom terms: " + ", ".join([f'{word} [{checksum}]' for word, checksum in model_hijack.used_custom_terms]))
A
AUTOMATIC 已提交
769

A
AUTOMATIC 已提交
770
            # we manually generate all input noises because each one should have a specific seed
A
AUTOMATIC 已提交
771
            x = create_random_tensors([opt_C, height // opt_f, width // opt_f], seeds=seeds)
A
AUTOMATIC 已提交
772

A
AUTOMATIC 已提交
773
            samples_ddim = func_sample(init_data=init_data, x=x, conditioning=c, unconditional_conditioning=uc)
A
first  
AUTOMATIC 已提交
774

A
AUTOMATIC 已提交
775 776
            x_samples_ddim = model.decode_first_stage(samples_ddim)
            x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
A
first  
AUTOMATIC 已提交
777

A
AUTOMATIC 已提交
778
            if prompt_matrix or opts.samples_save or opts.grid_save:
A
AUTOMATIC 已提交
779 780 781 782 783
                for i, x_sample in enumerate(x_samples_ddim):
                    x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
                    x_sample = x_sample.astype(np.uint8)

                    if use_GFPGAN and GFPGAN is not None:
C
Craftyawesome 已提交
784
                        torch_gc()
A
AUTOMATIC 已提交
785 786 787 788
                        cropped_faces, restored_faces, restored_img = GFPGAN.enhance(x_sample, has_aligned=False, only_center_face=False, paste_back=True)
                        x_sample = restored_img

                    image = Image.fromarray(x_sample)
A
AUTOMATIC 已提交
789
                    save_image(image, sample_path, f"{base_count:05}", seeds[i], prompts[i], opts.samples_format, info=infotext())
A
AUTOMATIC 已提交
790 791 792

                    output_images.append(image)
                    base_count += 1
A
first  
AUTOMATIC 已提交
793

A
AUTOMATIC 已提交
794
        if (prompt_matrix or opts.grid_save) and not do_not_save_grid:
795
            if prompt_matrix:
796
                grid = image_grid(output_images, batch_size, force_n_rows=1 << ((len(prompt_matrix_parts)-1)//2))
A
AUTOMATIC 已提交
797 798 799

                try:
                    grid = draw_prompt_matrix(grid, width, height, prompt_matrix_parts)
800
                except:
A
AUTOMATIC 已提交
801 802 803 804
                    import traceback
                    print("Error creating prompt_matrix text:", file=sys.stderr)
                    print(traceback.format_exc(), file=sys.stderr)

805
                output_images.insert(0, grid)
806 807
            else:
                grid = image_grid(output_images, batch_size)
808

A
AUTOMATIC 已提交
809
            save_image(grid, outpath, f"grid-{grid_count:04}", seed, prompt, opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename)
A
first  
AUTOMATIC 已提交
810 811
            grid_count += 1

A
AUTOMATIC 已提交
812 813
    torch_gc()
    return output_images, seed, infotext()
D
dogewanwan 已提交
814

A
AUTOMATIC 已提交
815

A
AUTOMATIC 已提交
816
def txt2img(prompt: str, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix: bool, ddim_eta: float, n_iter: int, batch_size: int, cfg_scale: float, seed: int, height: int, width: int):
A
AUTOMATIC 已提交
817
    outpath = opts.outdir or "outputs/txt2img-samples"
D
dogewanwan 已提交
818

A
AUTOMATIC 已提交
819
    sampler = samplers[sampler_index].constructor(model)
A
AUTOMATIC 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833

    def init():
        pass

    def sample(init_data, x, conditioning, unconditional_conditioning):
        samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=cfg_scale, unconditional_conditioning=unconditional_conditioning, eta=ddim_eta, x_T=x)
        return samples_ddim

    output_images, seed, info = process_images(
        outpath=outpath,
        func_init=init,
        func_sample=sample,
        prompt=prompt,
        seed=seed,
A
AUTOMATIC 已提交
834
        sampler_index=sampler_index,
A
AUTOMATIC 已提交
835 836 837 838 839 840 841 842 843 844 845 846
        batch_size=batch_size,
        n_iter=n_iter,
        steps=ddim_steps,
        cfg_scale=cfg_scale,
        width=width,
        height=height,
        prompt_matrix=prompt_matrix,
        use_GFPGAN=use_GFPGAN
    )

    del sampler

A
AUTOMATIC 已提交
847
    return output_images, seed, plaintext_to_html(info)
A
AUTOMATIC 已提交
848 849


A
AUTOMATIC 已提交
850 851 852 853 854
class Flagging(gr.FlaggingCallback):

    def setup(self, components, flagging_dir: str):
        pass

855 856 857
    def flag(self, flag_data, flag_option=None, flag_index=None, username=None):
        import csv

A
AUTOMATIC 已提交
858 859
        os.makedirs("log/images", exist_ok=True)

A
AUTOMATIC 已提交
860
        # those must match the "txt2img" function
A
AUTOMATIC 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
        prompt, ddim_steps, sampler_name, use_GFPGAN, prompt_matrix, ddim_eta, n_iter, n_samples, cfg_scale, request_seed, height, width, images, seed, comment = flag_data

        filenames = []

        with open("log/log.csv", "a", encoding="utf8", newline='') as file:
            import time
            import base64

            at_start = file.tell() == 0
            writer = csv.writer(file)
            if at_start:
                writer.writerow(["prompt", "seed", "width", "height", "cfgs", "steps", "filename"])

            filename_base = str(int(time.time() * 1000))
            for i, filedata in enumerate(images):
                filename = "log/images/"+filename_base + ("" if len(images) == 1 else "-"+str(i+1)) + ".png"

                if filedata.startswith("data:image/png;base64,"):
                    filedata = filedata[len("data:image/png;base64,"):]

                with open(filename, "wb") as imgfile:
                    imgfile.write(base64.decodebytes(filedata.encode('utf-8')))

                filenames.append(filename)

            writer.writerow([prompt, seed, width, height, cfg_scale, ddim_steps, filenames[0]])

        print("Logged:", filenames[0])

A
first  
AUTOMATIC 已提交
890

A
AUTOMATIC 已提交
891
txt2img_interface = gr.Interface(
A
AUTOMATIC 已提交
892
    wrap_gradio_call(txt2img),
A
first  
AUTOMATIC 已提交
893 894 895
    inputs=[
        gr.Textbox(label="Prompt", placeholder="A corgi wearing a top hat as an oil painting.", lines=1),
        gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
A
AUTOMATIC 已提交
896
        gr.Radio(label='Sampling method', choices=[x.name for x in samplers], value=samplers[0].name, type="index"),
A
first  
AUTOMATIC 已提交
897
        gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
A
AUTOMATIC 已提交
898
        gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
A
first  
AUTOMATIC 已提交
899
        gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="DDIM ETA", value=0.0, visible=False),
A
AUTOMATIC 已提交
900
        gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
901
        gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
902
        gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
A
first  
AUTOMATIC 已提交
903 904 905 906 907 908 909
        gr.Number(label='Seed', value=-1),
        gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
        gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
    ],
    outputs=[
        gr.Gallery(label="Images"),
        gr.Number(label='Seed'),
A
AUTOMATIC 已提交
910
        gr.HTML(),
A
first  
AUTOMATIC 已提交
911
    ],
A
AUTOMATIC 已提交
912
    title="Stable Diffusion Text-to-Image",
A
AUTOMATIC 已提交
913
    flagging_callback=Flagging()
A
first  
AUTOMATIC 已提交
914 915 916
)


A
AUTOMATIC 已提交
917
def img2img(prompt: str, init_img, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix, loopback: bool, sd_upscale: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, height: int, width: int, resize_mode: int):
A
AUTOMATIC 已提交
918
    outpath = opts.outdir or "outputs/img2img-samples"
D
dogewanwan 已提交
919

A
AUTOMATIC 已提交
920
    sampler = samplers_for_img2img[sampler_index].constructor(model)
A
first  
AUTOMATIC 已提交
921

A
AUTOMATIC 已提交
922
    assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
A
first  
AUTOMATIC 已提交
923

A
AUTOMATIC 已提交
924 925
    def init():
        image = init_img.convert("RGB")
A
AUTOMATIC 已提交
926
        image = resize_image(resize_mode, image, width, height)
A
AUTOMATIC 已提交
927 928 929
        image = np.array(image).astype(np.float32) / 255.0
        image = image[None].transpose(0, 3, 1, 2)
        image = torch.from_numpy(image)
A
first  
AUTOMATIC 已提交
930

931 932 933 934
        init_image = 2. * image - 1.
        init_image = init_image.to(device)
        init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
        init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image))  # move to latent space
A
AUTOMATIC 已提交
935

A
AUTOMATIC 已提交
936 937 938
        return init_latent,

    def sample(init_data, x, conditioning, unconditional_conditioning):
A
AUTOMATIC 已提交
939 940
        t_enc = int(denoising_strength * ddim_steps)

A
AUTOMATIC 已提交
941 942 943 944 945 946 947 948
        x0, = init_data

        sigmas = sampler.model_wrap.get_sigmas(ddim_steps)
        noise = x * sigmas[ddim_steps - t_enc - 1]

        xi = x0 + noise
        sigma_sched = sigmas[ddim_steps - t_enc - 1:]
        model_wrap_cfg = CFGDenoiser(sampler.model_wrap)
A
AUTOMATIC 已提交
949
        samples_ddim = sampler.func(model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': cfg_scale}, disable=False)
A
AUTOMATIC 已提交
950 951
        return samples_ddim

A
AUTOMATIC 已提交
952 953 954 955 956 957 958 959 960 961 962 963
    if loopback:
        output_images, info = None, None
        history = []
        initial_seed = None

        for i in range(n_iter):
            output_images, seed, info = process_images(
                outpath=outpath,
                func_init=init,
                func_sample=sample,
                prompt=prompt,
                seed=seed,
A
AUTOMATIC 已提交
964
                sampler_index=sampler_index,
A
AUTOMATIC 已提交
965 966 967 968 969 970 971 972
                batch_size=1,
                n_iter=1,
                steps=ddim_steps,
                cfg_scale=cfg_scale,
                width=width,
                height=height,
                prompt_matrix=prompt_matrix,
                use_GFPGAN=use_GFPGAN,
973
                do_not_save_grid=True,
A
AUTOMATIC 已提交
974
                extra_generation_params={"Denoising Strength": denoising_strength},
A
AUTOMATIC 已提交
975 976 977 978 979 980 981 982 983 984 985 986
            )

            if initial_seed is None:
                initial_seed = seed

            init_img = output_images[0]
            seed = seed + 1
            denoising_strength = max(denoising_strength * 0.95, 0.1)
            history.append(init_img)

        grid_count = len(os.listdir(outpath)) - 1
        grid = image_grid(history, batch_size, force_n_rows=1)
987

A
AUTOMATIC 已提交
988
        save_image(grid, outpath, f"grid-{grid_count:04}", initial_seed, prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename)
A
AUTOMATIC 已提交
989 990 991 992

        output_images = history
        seed = initial_seed

A
AUTOMATIC 已提交
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
    elif sd_upscale:
        initial_seed = None
        initial_info = None

        img = upscale_with_realesrgan(init_img, RealESRGAN_upscaling=2, RealESRGAN_model_index=0)

        torch_gc()

        grid = split_grid(img, tile_w=width, tile_h=height, overlap=opts.sd_upscale_overlap)


        print(f"SD upscaling will process a total of {len(grid.tiles[0][2])}x{len(grid.tiles)} images.")

        for y, h, row in grid.tiles:
            for tiledata in row:
                init_img = tiledata[2]

                output_images, seed, info = process_images(
                    outpath=outpath,
                    func_init=init,
                    func_sample=sample,
                    prompt=prompt,
                    seed=seed,
                    sampler_index=sampler_index,
                    batch_size=1,  # since process_images can't work with multiple different images we have to do this for now
                    n_iter=1,
                    steps=ddim_steps,
                    cfg_scale=cfg_scale,
                    width=width,
                    height=height,
                    prompt_matrix=prompt_matrix,
                    use_GFPGAN=use_GFPGAN,
                    do_not_save_grid=True,
                    extra_generation_params={"Denoising Strength": denoising_strength},
                )

                if initial_seed is None:
                    initial_seed = seed
                    initial_info = info

                seed += 1

                tiledata[2] = output_images[0]

        combined_image = combine_grid(grid)

        grid_count = len(os.listdir(outpath)) - 1
        save_image(combined_image, outpath, f"grid-{grid_count:04}", initial_seed, prompt, opts.grid_format, info=initial_info, short_filename=not opts.grid_extended_filename)

        output_images = [combined_image]
        seed = initial_seed
        info = initial_info

A
AUTOMATIC 已提交
1046 1047 1048 1049 1050 1051 1052
    else:
        output_images, seed, info = process_images(
            outpath=outpath,
            func_init=init,
            func_sample=sample,
            prompt=prompt,
            seed=seed,
A
AUTOMATIC 已提交
1053
            sampler_index=sampler_index,
A
AUTOMATIC 已提交
1054 1055 1056 1057 1058 1059 1060
            batch_size=batch_size,
            n_iter=n_iter,
            steps=ddim_steps,
            cfg_scale=cfg_scale,
            width=width,
            height=height,
            prompt_matrix=prompt_matrix,
1061
            use_GFPGAN=use_GFPGAN,
A
AUTOMATIC 已提交
1062
            extra_generation_params={"Denoising Strength": denoising_strength},
A
AUTOMATIC 已提交
1063
        )
A
AUTOMATIC 已提交
1064

A
AUTOMATIC 已提交
1065
    del sampler
A
first  
AUTOMATIC 已提交
1066

A
AUTOMATIC 已提交
1067
    return output_images, seed, plaintext_to_html(info)
A
first  
AUTOMATIC 已提交
1068 1069


1070 1071 1072
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None

A
first  
AUTOMATIC 已提交
1073
img2img_interface = gr.Interface(
A
AUTOMATIC 已提交
1074
    wrap_gradio_call(img2img),
A
first  
AUTOMATIC 已提交
1075 1076
    inputs=[
        gr.Textbox(placeholder="A fantasy landscape, trending on artstation.", lines=1),
1077
        gr.Image(value=sample_img2img, source="upload", interactive=True, type="pil"),
A
first  
AUTOMATIC 已提交
1078
        gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
A
AUTOMATIC 已提交
1079
        gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index"),
1080
        gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
A
AUTOMATIC 已提交
1081
        gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
A
AUTOMATIC 已提交
1082
        gr.Checkbox(label='Loopback (use images from previous batch when creating next batch)', value=False),
A
AUTOMATIC 已提交
1083
        gr.Checkbox(label='Stable Diffusion upscale', value=False),
A
AUTOMATIC 已提交
1084
        gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
1085
        gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
1086
        gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
A
first  
AUTOMATIC 已提交
1087 1088
        gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising Strength', value=0.75),
        gr.Number(label='Seed', value=-1),
1089 1090
        gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
        gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
A
AUTOMATIC 已提交
1091
        gr.Radio(label="Resize mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
A
first  
AUTOMATIC 已提交
1092 1093 1094
    ],
    outputs=[
        gr.Gallery(),
A
AUTOMATIC 已提交
1095
        gr.Number(label='Seed'),
A
AUTOMATIC 已提交
1096
        gr.HTML(),
A
first  
AUTOMATIC 已提交
1097
    ],
1098
    allow_flagging="never",
A
first  
AUTOMATIC 已提交
1099 1100
)

A
AUTOMATIC 已提交
1101

A
AUTOMATIC 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
def upscale_with_realesrgan(image, RealESRGAN_upscaling, RealESRGAN_model_index):
    info = realesrgan_models[RealESRGAN_model_index]

    model = info.model()
    upsampler = RealESRGANer(
        scale=info.netscale,
        model_path=info.location,
        model=model,
        half=True
    )

    upsampled = upsampler.enhance(np.array(image), outscale=RealESRGAN_upscaling)[0]

    image = Image.fromarray(upsampled)
    return image


A
AUTOMATIC 已提交
1119
def run_extras(image, GFPGAN_strength, RealESRGAN_upscaling, RealESRGAN_model_index):
A
AUTOMATIC 已提交
1120 1121
    torch_gc()

1122 1123
    image = image.convert("RGB")

A
AUTOMATIC 已提交
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
    outpath = opts.outdir or "outputs/extras-samples"

    if GFPGAN is not None and GFPGAN_strength > 0:
        cropped_faces, restored_faces, restored_img = GFPGAN.enhance(np.array(image, dtype=np.uint8), has_aligned=False, only_center_face=False, paste_back=True)
        res = Image.fromarray(restored_img)

        if GFPGAN_strength < 1.0:
            res = Image.blend(image, res, GFPGAN_strength)

        image = res

    if have_realesrgan and RealESRGAN_upscaling != 1.0:
A
AUTOMATIC 已提交
1136
        image = upscale_with_realesrgan(image, RealESRGAN_upscaling, RealESRGAN_model_index)
1137

A
AUTOMATIC 已提交
1138 1139 1140 1141
    os.makedirs(outpath, exist_ok=True)
    base_count = len(os.listdir(outpath))

    save_image(image, outpath, f"{base_count:05}", None, '', opts.samples_format, short_filename=True)
1142

A
AUTOMATIC 已提交
1143
    return image, 0, ''
1144 1145


A
AUTOMATIC 已提交
1146 1147
extras_interface = gr.Interface(
    wrap_gradio_call(run_extras),
A
AUTOMATIC 已提交
1148 1149
    inputs=[
        gr.Image(label="Source", source="upload", interactive=True, type="pil"),
A
AUTOMATIC 已提交
1150 1151 1152
        gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN strength", value=1, interactive=GFPGAN is not None),
        gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Real-ESRGAN upscaling", value=2, interactive=have_realesrgan),
        gr.Radio(label='Real-ESRGAN model', choices=[x.name for x in realesrgan_models], value=realesrgan_models[0].name, type="index", interactive=have_realesrgan),
A
AUTOMATIC 已提交
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
    ],
    outputs=[
        gr.Image(label="Result"),
        gr.Number(label='Seed', visible=False),
        gr.HTML(),
    ],
    allow_flagging="never",
)

opts = Options()
if os.path.exists(config_filename):
    opts.load(config_filename)


def run_settings(*args):
    up = []

    for key, value, comp in zip(opts.data_labels.keys(), args, settings_interface.input_components):
        opts.data[key] = value
        up.append(comp.update(value=value))

    opts.save(config_filename)

    return 'Settings saved.', ''


def create_setting_component(key):
    def fun():
        return opts.data[key] if key in opts.data else opts.data_labels[key][0]

    labelinfo = opts.data_labels[key]
    t = type(labelinfo[0])
    label = labelinfo[1]
    if t == str:
        item = gr.Textbox(label=label, value=fun, lines=1)
    elif t == int:
A
AUTOMATIC 已提交
1189 1190 1191
        if len(labelinfo) == 5:
            item = gr.Slider(minimum=labelinfo[2], maximum=labelinfo[3], step=labelinfo[4], label=label, value=fun)
        elif len(labelinfo) == 4:
A
AUTOMATIC 已提交
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
            item = gr.Slider(minimum=labelinfo[2], maximum=labelinfo[3], step=1, label=label, value=fun)
        else:
            item = gr.Number(label=label, value=fun)
    elif t == bool:
        item = gr.Checkbox(label=label, value=fun)
    else:
        raise Exception(f'bad options item type: {str(t)} for key {key}')

    return item


settings_interface = gr.Interface(
    run_settings,
    inputs=[create_setting_component(key) for key in opts.data_labels.keys()],
    outputs=[
        gr.Textbox(label='Result'),
        gr.HTML(),
    ],
    title=None,
    description=None,
    allow_flagging="never",
)

interfaces = [
    (txt2img_interface, "txt2img"),
    (img2img_interface, "img2img"),
A
AUTOMATIC 已提交
1218
    (extras_interface, "Extras"),
A
AUTOMATIC 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227
    (settings_interface, "Settings"),
]

config = OmegaConf.load(cmd_opts.config)
model = load_model_from_config(config, cmd_opts.ckpt)

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = (model if cmd_opts.no_half else model.half()).to(device)

1228 1229
model_hijack = StableDiffuionModelHijack()
model_hijack.hijack(model)
1230

1231 1232 1233
demo = gr.TabbedInterface(
    interface_list=[x[0] for x in interfaces],
    tab_names=[x[1] for x in interfaces],
A
AUTOMATIC 已提交
1234
    css=("" if cmd_opts.no_progressbar_hiding else css_hide_progressbar) + """
A
AUTOMATIC 已提交
1235 1236 1237
.output-html p {margin: 0 0.5em;}
.performance { font-size: 0.85em; color: #444; }
"""
1238
)
A
first  
AUTOMATIC 已提交
1239

A
AUTOMATIC 已提交
1240
demo.launch()