preprocess.py 7.4 KB
Newer Older
1
import os
2
from PIL import Image, ImageOps
M
Milly 已提交
3
import math
4 5
import platform
import sys
6
import tqdm
7
import time
8

9
from modules import shared, images, deepbooru
10
from modules.paths import models_path
11
from modules.shared import opts, cmd_opts
12
from modules.textual_inversion import autocrop
13

14

C
captin411 已提交
15
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
16 17 18 19 20
    try:
        if process_caption:
            shared.interrogator.load()

        if process_caption_deepbooru:
21
            deepbooru.model.start()
22

C
captin411 已提交
23
        preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
24 25 26 27 28 29 30

    finally:

        if process_caption:
            shared.interrogator.send_blip_to_ram()

        if process_caption_deepbooru:
31
            deepbooru.model.stop()
32 33


34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
def listfiles(dirname):
    return os.listdir(dirname)


class PreprocessParams:
    src = None
    dstdir = None
    subindex = 0
    flip = False
    process_caption = False
    process_caption_deepbooru = False
    preprocess_txt_action = None


def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
    caption = ""

    if params.process_caption:
        caption += shared.interrogator.generate_caption(image)

    if params.process_caption_deepbooru:
        if len(caption) > 0:
            caption += ", "
57
        caption += deepbooru.model.tag_multi(image)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111

    filename_part = params.src
    filename_part = os.path.splitext(filename_part)[0]
    filename_part = os.path.basename(filename_part)

    basename = f"{index:05}-{params.subindex}-{filename_part}"
    image.save(os.path.join(params.dstdir, f"{basename}.png"))

    if params.preprocess_txt_action == 'prepend' and existing_caption:
        caption = existing_caption + ' ' + caption
    elif params.preprocess_txt_action == 'append' and existing_caption:
        caption = caption + ' ' + existing_caption
    elif params.preprocess_txt_action == 'copy' and existing_caption:
        caption = existing_caption

    caption = caption.strip()

    if len(caption) > 0:
        with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
            file.write(caption)

    params.subindex += 1


def save_pic(image, index, params, existing_caption=None):
    save_pic_with_caption(image, index, params, existing_caption=existing_caption)

    if params.flip:
        save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)


def split_pic(image, inverse_xy, width, height, overlap_ratio):
    if inverse_xy:
        from_w, from_h = image.height, image.width
        to_w, to_h = height, width
    else:
        from_w, from_h = image.width, image.height
        to_w, to_h = width, height
    h = from_h * to_w // from_w
    if inverse_xy:
        image = image.resize((h, to_w))
    else:
        image = image.resize((to_w, h))

    split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
    y_step = (h - to_h) / (split_count - 1)
    for i in range(split_count):
        y = int(y_step * i)
        if inverse_xy:
            splitted = image.crop((y, 0, y + to_h, to_w))
        else:
            splitted = image.crop((0, y, to_w, y + to_h))
        yield splitted

112

C
captin411 已提交
113
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
A
alg-wiki 已提交
114 115
    width = process_width
    height = process_height
116 117
    src = os.path.abspath(process_src)
    dst = os.path.abspath(process_dst)
118 119
    split_threshold = max(0.0, min(1.0, split_threshold))
    overlap_ratio = max(0.0, min(0.9, overlap_ratio))
120

121
    assert src != dst, 'same directory specified as source and destination'
122 123 124

    os.makedirs(dst, exist_ok=True)

125
    files = listfiles(src)
126

V
Vladimir Mandic 已提交
127
    shared.state.job = "preprocess"
128 129 130
    shared.state.textinfo = "Preprocessing..."
    shared.state.job_count = len(files)

131 132 133 134 135 136
    params = PreprocessParams()
    params.dstdir = dst
    params.flip = process_flip
    params.process_caption = process_caption
    params.process_caption_deepbooru = process_caption_deepbooru
    params.preprocess_txt_action = preprocess_txt_action
137

138
    for index, imagefile in enumerate(tqdm.tqdm(files)):
139
        params.subindex = 0
140
        filename = os.path.join(src, imagefile)
A
alg-wiki 已提交
141 142 143 144
        try:
            img = Image.open(filename).convert("RGB")
        except Exception:
            continue
145

146 147
        params.src = filename

D
DepFA 已提交
148
        existing_caption = None
149 150 151 152
        existing_caption_filename = os.path.splitext(filename)[0] + '.txt'
        if os.path.exists(existing_caption_filename):
            with open(existing_caption_filename, 'r', encoding="utf8") as file:
                existing_caption = file.read()
D
DepFA 已提交
153

154 155 156
        if shared.state.interrupted:
            break

M
Milly 已提交
157 158 159 160 161 162
        if img.height > img.width:
            ratio = (img.width * height) / (img.height * width)
            inverse_xy = False
        else:
            ratio = (img.height * width) / (img.width * height)
            inverse_xy = True
163

C
captin411 已提交
164
        process_default_resize = True
165

M
Milly 已提交
166
        if process_split and ratio < 1.0 and ratio <= split_threshold:
167 168
            for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
                save_pic(splitted, index, params, existing_caption=existing_caption)
C
captin411 已提交
169
            process_default_resize = False
170

171 172 173 174 175 176 177 178
        if process_focal_crop and img.height != img.width:

            dnn_model_path = None
            try:
                dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv"))
            except Exception as e:
                print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)

179 180 181
            autocrop_settings = autocrop.Settings(
                crop_width = width,
                crop_height = height,
C
captin411 已提交
182 183 184
                face_points_weight = process_focal_crop_face_weight,
                entropy_points_weight = process_focal_crop_entropy_weight,
                corner_points_weight = process_focal_crop_edges_weight,
185 186
                annotate_image = process_focal_crop_debug,
                dnn_model_path = dnn_model_path,
187
            )
C
captin411 已提交
188
            for focal in autocrop.crop_image(img, autocrop_settings):
189
                save_pic(focal, index, params, existing_caption=existing_caption)
C
captin411 已提交
190
            process_default_resize = False
191

C
captin411 已提交
192
        if process_default_resize:
A
alg-wiki 已提交
193
            img = images.resize_image(1, img, width, height)
194
            save_pic(img, index, params, existing_caption=existing_caption)
195

196
        shared.state.nextjob()