Commit 822597db authored by catboxanon's avatar catboxanon
Browse files

Encode batches separately

Significantly reduces VRAM.
This makes encoding more inline with how decoding currently functions.
parent da80d649
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -92,6 +92,14 @@ def images_tensor_to_samples(image, approximation=None, model=None):
            model = shared.sd_model
        image = image.to(shared.device, dtype=devices.dtype_vae)
        image = image * 2 - 1
        if len(image) > 1:
            x_latent = torch.stack([
                model.get_first_stage_encoding(
                    model.encode_first_stage(torch.unsqueeze(img, 0))
                )[0]
                for img in image
            ])
        else:
            x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))

    return x_latent