Commit 63781563 authored by Tim Patton's avatar Tim Patton
Browse files

Generalize SD torch load/save to implement safetensor merging compat

parent ac7ecd2d
Loading
Loading
Loading
Loading
+8 −7
Original line number Diff line number Diff line
@@ -249,7 +249,7 @@ def run_pnginfo(image):
    return '', geninfo, info


def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, multiplier, save_as_half, custom_name):
def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, multiplier, save_as_half, save_as_safetensors, custom_name):
    def weighted_sum(theta0, theta1, alpha):
        return ((1 - alpha) * theta0) + (alpha * theta1)

@@ -264,16 +264,16 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam
    teritary_model_info = sd_models.checkpoints_list.get(teritary_model_name, None)

    print(f"Loading {primary_model_info.filename}...")
    primary_model = torch.load(primary_model_info.filename, map_location='cpu')
    primary_model = sd_models.torch_load(primary_model_info.filename, primary_model_info, map_override='cpu')
    theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)

    print(f"Loading {secondary_model_info.filename}...")
    secondary_model = torch.load(secondary_model_info.filename, map_location='cpu')
    secondary_model = sd_models.torch_load(secondary_model_info.filename, primary_model_info, map_override='cpu')
    theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model)

    if teritary_model_info is not None:
        print(f"Loading {teritary_model_info.filename}...")
        teritary_model = torch.load(teritary_model_info.filename, map_location='cpu')
        teritary_model = sd_models.torch_load(teritary_model_info.filename, teritary_model_info, map_override='cpu')
        theta_2 = sd_models.get_state_dict_from_checkpoint(teritary_model)
    else:
        teritary_model = None
@@ -314,12 +314,13 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam

    ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path

    filename = primary_model_info.model_name + '_' + str(round(1-multiplier, 2)) + '-' + secondary_model_info.model_name + '_' + str(round(multiplier, 2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt'
    filename = filename if custom_name == '' else (custom_name + '.ckpt')
    output_exttype = '.safetensors' if save_as_safetensors else '.ckpt'
    filename = primary_model_info.model_name + '_' + str(round(1-multiplier, 2)) + '-' + secondary_model_info.model_name + '_' + str(round(multiplier, 2)) + '-' + interp_method.replace(" ", "_") + '-merged' + output_exttype
    filename = filename if custom_name == '' else (custom_name + output_exttype)
    output_modelname = os.path.join(ckpt_dir, filename)

    print(f"Saving to {output_modelname}...")
    torch.save(primary_model, output_modelname)
    sd_models.torch_save(primary_model, output_modelname)

    sd_models.list_models()

+18 −7
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@ import sys
import gc
from collections import namedtuple
import torch
from safetensors.torch import load_file
from safetensors.torch import load_file, save_file
import re
from omegaconf import OmegaConf

@@ -143,6 +143,22 @@ def transform_checkpoint_dict_key(k):

    return k

def torch_load(model_filename, model_info, map_override=None):
    map_override=shared.weight_load_location if not map_override else map_override
    if(checkpoint_types[model_info.exttype] == 'safetensors'):
        # safely load weights
        # TODO: safetensors supports zero copy fast load to gpu, see issue #684
        return load_file(model_filename, device=map_override)
    else:
        return torch.load(model_filename, map_location=map_override)

def torch_save(model, output_filename):
    basename, exttype = os.path.splitext(output_filename)
    if(checkpoint_types[exttype] == 'safetensors'):
        # [=====  >] Reticulating brines...
        save_file(model, output_filename, metadata={"format": "pt"})
    else:
        torch.save(model, output_filename)

def get_state_dict_from_checkpoint(pl_sd):
    if "state_dict" in pl_sd:
@@ -175,12 +191,7 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
        # load from file
        print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")

        if(checkpoint_types[checkpoint_info.exttype] == 'safetensors'):
            # safely load weights
            # TODO: safetensors supports zero copy fast load to gpu, see issue #684
            pl_sd = load_file(checkpoint_file, device=shared.weight_load_location)
        else:
            pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
        pl_sd = torch_load(checkpoint_file, checkpoint_info)

        if "global_step" in pl_sd:
            print(f"Global Step: {pl_sd['global_step']}")
+1814 −1812
Original line number Diff line number Diff line
@@ -1187,6 +1187,7 @@ def create_ui(wrap_gradio_gpu_call):
                interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3)
                interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method")
                save_as_half = gr.Checkbox(value=False, label="Save as float16")
                save_as_safetensors = gr.Checkbox(value=False, label="Save as safetensors format")
                modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')

            with gr.Column(variant='panel'):
@@ -1699,6 +1700,7 @@ def create_ui(wrap_gradio_gpu_call):
                interp_method,
                interp_amount,
                save_as_half,
                save_as_safetensors,
                custom_name,
            ],
            outputs=[