Commit 924e2220 authored by AUTOMATIC's avatar AUTOMATIC
Browse files

add option to show/hide warnings

removed hiding warnings from LDSR
fixed/reworked few places that produced warnings
parent 889b851a
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
import os
import gc
import time
import warnings

import numpy as np
import torch
@@ -15,8 +14,6 @@ from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules import shared, sd_hijack

warnings.filterwarnings("ignore", category=UserWarning)

cached_ldsr_model: torch.nn.Module = None


+1 −1
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@ ignore_ids_for_localization={
    train_embedding: 'OPTION',
    train_hypernetwork: 'OPTION',
    txt2img_styles: 'OPTION',
    img2img_styles 'OPTION',
    img2img_styles: 'OPTION',
    setting_random_artist_categories: 'SPAN',
    setting_face_restoration_model: 'SPAN',
    setting_realesrgan_enabled_models: 'SPAN',
+6 −1
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers, hashes
from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint
from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
@@ -575,6 +575,8 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi

    pbar = tqdm.tqdm(total=steps - initial_step)
    try:
        sd_hijack_checkpoint.add()

        for i in range((steps-initial_step) * gradient_step):
            if scheduler.finished:
                break
@@ -724,6 +726,9 @@ Last saved image: {html.escape(last_saved_image)}<br/>
        pbar.close()
        hypernetwork.eval()
        #report_statistics(loss_dict)
        sd_hijack_checkpoint.remove()



    filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
    hypernetwork.optimizer_name = optimizer_name
+0 −8
Original line number Diff line number Diff line
@@ -69,12 +69,6 @@ def undo_optimizations():
    ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward


def fix_checkpoint():
    ldm.modules.attention.BasicTransformerBlock.forward = sd_hijack_checkpoint.BasicTransformerBlock_forward
    ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = sd_hijack_checkpoint.ResBlock_forward
    ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = sd_hijack_checkpoint.AttentionBlock_forward


class StableDiffusionModelHijack:
    fixes = None
    comments = []
@@ -107,8 +101,6 @@ class StableDiffusionModelHijack:

        self.clip = m.cond_stage_model

        fix_checkpoint()

        def flatten(el):
            flattened = [flatten(children) for children in el.children()]
            res = [el]
+37 −1
Original line number Diff line number Diff line
from torch.utils.checkpoint import checkpoint

import ldm.modules.attention
import ldm.modules.diffusionmodules.openaimodel


def BasicTransformerBlock_forward(self, x, context=None):
    return checkpoint(self._forward, x, context)


def AttentionBlock_forward(self, x):
    return checkpoint(self._forward, x)


def ResBlock_forward(self, x, emb):
    return checkpoint(self._forward, x, emb)


stored = []


def add():
    if len(stored) != 0:
        return

    stored.extend([
        ldm.modules.attention.BasicTransformerBlock.forward,
        ldm.modules.diffusionmodules.openaimodel.ResBlock.forward,
        ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward
    ])

    ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward
    ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward
    ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward


def remove():
    if len(stored) == 0:
        return

    ldm.modules.attention.BasicTransformerBlock.forward = stored[0]
    ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = stored[1]
    ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = stored[2]

    stored.clear()
Loading