Commit 028d3f64 authored by AUTOMATIC's avatar AUTOMATIC
Browse files

ruff auto fixes

parent e42de4b8
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -288,5 +288,5 @@ class VQModelInterface(VQModel):
        dec = self.decoder(quant)
        return dec

setattr(ldm.models.autoencoder, "VQModel", VQModel)
setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface)
ldm.models.autoencoder.VQModel = VQModel
ldm.models.autoencoder.VQModelInterface = VQModelInterface
+6 −6
Original line number Diff line number Diff line
@@ -1116,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
        if cond is not None:
            if isinstance(cond, dict):
                cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
                list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
                [x[:batch_size] for x in cond[key]] for key in cond}
            else:
                cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]

@@ -1215,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
        if cond is not None:
            if isinstance(cond, dict):
                cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
                list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
                [x[:batch_size] for x in cond[key]] for key in cond}
            else:
                cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
        return self.p_sample_loop(cond,
@@ -1437,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
        logs['bbox_image'] = cond_img
        return logs

setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1)
setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1)
setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1)
setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1)
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
+6 −6
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ def load_lora(name, filename):
        else:
            print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
            continue
            assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
            raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")

        with torch.no_grad():
            module.weight.copy_(weight)
@@ -184,7 +184,7 @@ def load_lora(name, filename):
        elif lora_key == "lora_down.weight":
            lora_module.down = module
        else:
            assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha'
            raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")

    if len(keys_failed_to_match) > 0:
        print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
@@ -202,7 +202,7 @@ def load_loras(names, multipliers=None):
    loaded_loras.clear()

    loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
    if any([x is None for x in loras_on_disk]):
    if any(x is None for x in loras_on_disk):
        list_available_loras()

        loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
@@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu

            print(f'failed to calculate lora weights for layer {lora_layer_name}')

        setattr(self, "lora_current_names", wanted_names)
        self.lora_current_names = wanted_names


def lora_forward(module, input, original_forward):
@@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward):


def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
    setattr(self, "lora_current_names", ())
    setattr(self, "lora_weights_backup", None)
    self.lora_current_names = ()
    self.lora_weights_backup = None


def lora_Linear_forward(self, input):
+1 −1
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)


shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
    "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
    "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras),
}))


+1 −1
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ def list_config_states():
                j["filepath"] = path
                config_states.append(j)

    config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True))
    config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)

    for cs in config_states:
        timestamp = time.asctime(time.gmtime(cs["created_at"]))
Loading