Unverified Commit 88e01b23 authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge pull request #6372 from timntorres/save-ti-hypernet-settings-to-txt-revised

Save hypernet and textual inversion settings to text file, revised.
parents 143ed5a4 b6bab2f0
Loading
Loading
Loading
Loading
+23 −2
Original line number Diff line number Diff line
@@ -401,7 +401,25 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
    hypernet.save(fn)

    shared.reload_hypernetworks()

# Note: textual_inversion.py has a nearly identical function of the same name.
def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
    # Starting index of preview-related arguments.
    border_index = 21
    # Get a list of the argument names.
    arg_names = inspect.getfullargspec(save_settings_to_file).args
    # Create a list of the argument names to include in the settings string.
    names = arg_names[:border_index]  # Include all arguments up until the preview-related ones.
    if preview_from_txt2img:
        names.extend(arg_names[border_index:])  # Include preview-related arguments if applicable.
    # Build the settings string.
    settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
    for name in names:
        if name != 'log_directory': # It's useless and redundant to save log_directory.
            value = locals()[name]
            settings_str += f"{name}: {value}\n"
    # Create or append to the file.
    with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
        fout.write(settings_str + "\n\n")

def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
    # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
@@ -458,6 +476,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,

    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)

    if shared.opts.save_training_settings_to_txt:
        save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)

    latent_sampling_method = ds.latent_sampling_method

    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
+1 −0
Original line number Diff line number Diff line
@@ -362,6 +362,7 @@ options_templates.update(options_section(('training', "Training"), {
    "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
    "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
    "save_training_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file whenever training starts."),
    "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
    "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
    "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
+25 −1
Original line number Diff line number Diff line
import os
import sys
import traceback
import inspect

import torch
import tqdm
@@ -230,6 +231,26 @@ def write_loss(log_directory, filename, step, epoch_len, values):
            **values,
        })

# Note: hypernetwork.py has a nearly identical function of the same name. 
def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
    # Starting index of preview-related arguments.
    border_index = 18
    # Get a list of the argument names.
    arg_names = inspect.getfullargspec(save_settings_to_file).args    
    # Create a list of the argument names to include in the settings string.
    names = arg_names[:border_index]  # Include all arguments up until the preview-related ones.
    if preview_from_txt2img:
        names.extend(arg_names[border_index:])  # Include preview-related arguments if applicable.
    # Build the settings string.
    settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
    for name in names:
        if name != 'log_directory': # It's useless and redundant to save log_directory.
            value = locals()[name]
            settings_str += f"{name}: {value}\n"
    # Create or append to the file.
    with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
        fout.write(settings_str + "\n\n")

def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
    assert model_name, f"{name} not selected"
    assert learn_rate, "Learning rate is empty or 0"
@@ -293,8 +314,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
    if initial_step >= steps:
        shared.state.textinfo = "Model has already been trained beyond specified max steps"
        return embedding, filename
    scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
    
    scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
    clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
        torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
        None
@@ -308,6 +329,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_

    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)

    if shared.opts.save_training_settings_to_txt:
            save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)

    latent_sampling_method = ds.latent_sampling_method

    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)