Unverified Commit 47b298d5 authored by w-e-w's avatar w-e-w Committed by GitHub
Browse files

Merge branch 'AUTOMATIC1111:master' into master

parents dd20fc0f ea9bd9fc
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -66,8 +66,8 @@ titles = {


    "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
    "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",


    "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
    "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",


    "Loopback": "Process an image, use it as an input, repeat.",
    "Loopback": "Process an image, use it as an input, repeat.",
+5 −3
Original line number Original line Diff line number Diff line
@@ -16,6 +16,7 @@ from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
from fonts.ttf import Roboto
import string
import string
import json
import json
import hashlib


from modules import sd_samplers, shared, script_callbacks
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
from modules.shared import opts, cmd_opts
@@ -198,7 +199,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):


    pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
    pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2


    result = Image.new("RGB", (im.width + pad_left + margin * (rows-1), im.height + pad_top + margin * (cols-1)), "white")
    result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white")


    for row in range(rows):
    for row in range(rows):
        for col in range(cols):
        for col in range(cols):
@@ -222,7 +223,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
    return result
    return result




def draw_prompt_matrix(im, width, height, all_prompts):
def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
    prompts = all_prompts[1:]
    prompts = all_prompts[1:]
    boundary = math.ceil(len(prompts) / 2)
    boundary = math.ceil(len(prompts) / 2)


@@ -232,7 +233,7 @@ def draw_prompt_matrix(im, width, height, all_prompts):
    hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
    hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
    ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
    ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]


    return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
    return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)




def resize_image(resize_mode, im, width, height, upscaler_name=None):
def resize_image(resize_mode, im, width, height, upscaler_name=None):
@@ -343,6 +344,7 @@ class FilenameGenerator:
        'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
        'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
        'datetime': lambda self, *args: self.datetime(*args),  # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
        'datetime': lambda self, *args: self.datetime(*args),  # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
        'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
        'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
        'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
        'prompt': lambda self: sanitize_filename_part(self.prompt),
        'prompt': lambda self: sanitize_filename_part(self.prompt),
        'prompt_no_styles': lambda self: self.prompt_no_style(),
        'prompt_no_styles': lambda self: self.prompt_no_style(),
        'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
        'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
+3 −0
Original line number Original line Diff line number Diff line
@@ -45,6 +45,9 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
                    full_path = file
                    full_path = file
                    if os.path.isdir(full_path):
                    if os.path.isdir(full_path):
                        continue
                        continue
                    if os.path.islink(full_path) and not os.path.exists(full_path):
                        print(f"Skipping broken symlink: {full_path}")
                        continue
                    if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
                    if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
                        continue
                        continue
                    if len(ext_filter) != 0:
                    if len(ext_filter) != 0:
+10 −7
Original line number Original line Diff line number Diff line
@@ -20,8 +20,9 @@ class DisableInitialization:
    ```
    ```
    """
    """


    def __init__(self):
    def __init__(self, disable_clip=True):
        self.replaced = []
        self.replaced = []
        self.disable_clip = disable_clip


    def replace(self, obj, field, func):
    def replace(self, obj, field, func):
        original = getattr(obj, field, None)
        original = getattr(obj, field, None)
@@ -75,6 +76,8 @@ class DisableInitialization:
        self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing)
        self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing)
        self.replace(torch.nn.init, '_no_grad_normal_', do_nothing)
        self.replace(torch.nn.init, '_no_grad_normal_', do_nothing)
        self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing)
        self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing)

        if self.disable_clip:
            self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
            self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
            self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained)
            self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained)
            self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model)
            self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model)
+5 −1
Original line number Original line Diff line number Diff line
@@ -354,6 +354,9 @@ def repair_config(sd_config):
        sd_config.model.params.unet_config.params.use_fp16 = True
        sd_config.model.params.unet_config.params.use_fp16 = True




sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'

def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None):
def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None):
    from modules import lowvram, sd_hijack
    from modules import lowvram, sd_hijack
    checkpoint_info = checkpoint_info or select_checkpoint()
    checkpoint_info = checkpoint_info or select_checkpoint()
@@ -374,6 +377,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_
        state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
        state_dict = get_checkpoint_state_dict(checkpoint_info, timer)


    checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
    checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
    clip_is_included_into_sd = sd1_clip_weight in state_dict or sd2_clip_weight in state_dict


    timer.record("find config")
    timer.record("find config")


@@ -386,7 +390,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_


    sd_model = None
    sd_model = None
    try:
    try:
        with sd_disable_initialization.DisableInitialization():
        with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
            sd_model = instantiate_from_config(sd_config.model)
            sd_model = instantiate_from_config(sd_config.model)
    except Exception as e:
    except Exception as e:
        pass
        pass
Loading