Unverified Commit 9553a7e0 authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge pull request #3818 from jwatzman/master

Reduce peak memory usage when changing models
parents 28e6d4a5 b50ff4f4
Loading
Loading
Loading
Loading
+7 −4
Original line number Original line Diff line number Diff line
@@ -173,7 +173,9 @@ def load_model_weights(model, checkpoint_info):
            print(f"Global Step: {pl_sd['global_step']}")
            print(f"Global Step: {pl_sd['global_step']}")


        sd = get_state_dict_from_checkpoint(pl_sd)
        sd = get_state_dict_from_checkpoint(pl_sd)
        missing, extra = model.load_state_dict(sd, strict=False)
        del pl_sd
        model.load_state_dict(sd, strict=False)
        del sd


        if shared.cmd_opts.opt_channelslast:
        if shared.cmd_opts.opt_channelslast:
            model.to(memory_format=torch.channels_last)
            model.to(memory_format=torch.channels_last)
@@ -197,6 +199,7 @@ def load_model_weights(model, checkpoint_info):


        model.first_stage_model.to(devices.dtype_vae)
        model.first_stage_model.to(devices.dtype_vae)


        if shared.opts.sd_checkpoint_cache > 0:
            checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
            checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
            while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
            while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
                checkpoints_loaded.popitem(last=False)  # LRU
                checkpoints_loaded.popitem(last=False)  # LRU