Commit 1bfec873 authored by AUTOMATIC's avatar AUTOMATIC
Browse files

add an experimental option to apply loras to outputs rather than inputs

parent 48a15821
Loading
Loading
Loading
Loading
+4 −1
Original line number Original line Diff line number Diff line
@@ -166,6 +166,9 @@ def lora_forward(module, input, res):
    for lora in loaded_loras:
    for lora in loaded_loras:
        module = lora.modules.get(lora_layer_name, None)
        module = lora.modules.get(lora_layer_name, None)
        if module is not None:
        if module is not None:
            if shared.opts.lora_apply_to_outputs and res.shape == input.shape:
                res = res + module.up(module.down(res)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
            else:
                res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
                res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)


    return res
    return res
+6 −1
Original line number Original line Diff line number Diff line
@@ -3,7 +3,7 @@ import torch
import lora
import lora
import extra_networks_lora
import extra_networks_lora
import ui_extra_networks_lora
import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks
from modules import script_callbacks, ui_extra_networks, extra_networks, shared




def unload():
def unload():
@@ -28,3 +28,8 @@ torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
script_callbacks.on_script_unloaded(unload)
script_callbacks.on_before_ui(before_ui)
script_callbacks.on_before_ui(before_ui)


shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
    "lora_apply_to_outputs": shared.OptionInfo(False, "Apply Lora to outputs rather than inputs when possible (experimental)"),
}))