Commit da8916f9 authored by AUTOMATIC1111's avatar AUTOMATIC1111
Browse files

added torch.mps.empty_cache() to torch_gc()

changed a bunch of places that use torch.cuda.empty_cache() to use torch_gc() instead
parent e161b5a0
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ import safetensors.torch

from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules import shared, sd_hijack
from modules import shared, sd_hijack, devices

cached_ldsr_model: torch.nn.Module = None

@@ -112,8 +112,7 @@ class LDSR:


        gc.collect()
        if torch.cuda.is_available:
            torch.cuda.empty_cache()
        devices.torch_gc()

        im_og = image
        width_og, height_og = im_og.size
@@ -150,8 +149,7 @@ class LDSR:

        del model
        gc.collect()
        if torch.cuda.is_available:
            torch.cuda.empty_cache()
        devices.torch_gc()

        return a

+2 −2
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):

    def do_upscale(self, img: PIL.Image.Image, selected_file):

        torch.cuda.empty_cache()
        devices.torch_gc()

        try:
            model = self.load_model(selected_file)
@@ -110,7 +110,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
        torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
        np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
        del torch_img, torch_output
        torch.cuda.empty_cache()
        devices.torch_gc()

        output = np_output.transpose((1, 2, 0))  # CHW to HWC
        output = output[:, :, ::-1]  # BGR to RGB
+1 −4
Original line number Diff line number Diff line
@@ -42,10 +42,7 @@ class UpscalerSwinIR(Upscaler):
            return img
        model = model.to(device_swinir, dtype=devices.dtype)
        img = upscale(img, model)
        try:
            torch.cuda.empty_cache()
        except Exception:
            pass
        devices.torch_gc()
        return img

    def load_model(self, path, scale=4):
+1 −1
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ def setup_model(dirname):
                            output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
                            restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
                        del output
                        torch.cuda.empty_cache()
                        devices.torch_gc()
                    except Exception:
                        errors.report('Failed inference for CodeFormer', exc_info=True)
                        restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
+3 −0
Original line number Diff line number Diff line
@@ -49,10 +49,13 @@ def get_device_for(task):


def torch_gc():

    if torch.cuda.is_available():
        with torch.cuda.device(get_cuda_device_string()):
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()
    elif has_mps() and hasattr(torch.mps, 'empty_cache'):
        torch.mps.empty_cache()


def enable_tf32():
Loading