Unverified Commit cfc9849f authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge branch 'master' into 6866-fix-hires-prompt-matrix

parents 5afd9e82 d99bd04b
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -104,8 +104,7 @@ Alternatively, use online services (like Google Colab):
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
2. Install [git](https://git-scm.com/download/win).
2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.


### Automatic Installation on Linux
### Automatic Installation on Linux
1. Install the dependencies:
1. Install the dependencies:
@@ -121,7 +120,7 @@ sudo pacman -S wget git python3
```bash
```bash
bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
```
```

3. Run `webui.sh`.
### Installation on Apple Silicon
### Installation on Apple Silicon


Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon).
Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon).
+5 −5
Original line number Original line Diff line number Diff line
@@ -8,8 +8,8 @@ titles = {
	"DDIM": "Denoising Diffusion Implicit Models - best at inpainting",
	"DDIM": "Denoising Diffusion Implicit Models - best at inpainting",
	"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", 
	"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", 


	"Batch count": "How many batches of images to create",
	"Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)",
	"Batch size": "How many image to create in a single batch",
	"Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)",
    "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
    "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
    "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
    "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
    "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
    "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
@@ -17,7 +17,7 @@ titles = {
    "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
    "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
    "\u{1f4c2}": "Open images output directory",
    "\u{1f4c2}": "Open images output directory",
    "\u{1f4be}": "Save style",
    "\u{1f4be}": "Save style",
    "\U0001F5D1": "Clear prompt",
    "\u{1f5d1}": "Clear prompt",
    "\u{1f4cb}": "Apply selected styles to current prompt",
    "\u{1f4cb}": "Apply selected styles to current prompt",
    "\u{1f4d2}": "Paste available values into the field",
    "\u{1f4d2}": "Paste available values into the field",
    "\u{1f3b4}": "Show extra networks",
    "\u{1f3b4}": "Show extra networks",
@@ -66,8 +66,8 @@ titles = {


    "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
    "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",


    "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
    "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
    "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",


    "Loopback": "Process an image, use it as an input, repeat.",
    "Loopback": "Process an image, use it as an input, repeat.",
+1 −1
Original line number Original line Diff line number Diff line
@@ -242,7 +242,7 @@ def prepare_environment():


    sys.argv += shlex.split(commandline_args)
    sys.argv += shlex.split(commandline_args)


    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json')
    parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json')
    args, _ = parser.parse_known_args(sys.argv)
    args, _ = parser.parse_known_args(sys.argv)


+7 −64
Original line number Original line Diff line number Diff line
import sys, os, shlex
import sys
import contextlib
import contextlib
import torch
import torch
from modules import errors
from modules import errors
from packaging import version

if sys.platform == "darwin":
    from modules import mac_specific




# has_mps is only available in nightly pytorch (for now) and macOS 12.3+.
# check `getattr` and try it for compatibility
def has_mps() -> bool:
def has_mps() -> bool:
    if not getattr(torch, 'has_mps', False):
    if sys.platform != "darwin":
        return False
    try:
        torch.zeros(1).to(torch.device("mps"))
        return True
    except Exception:
        return False
        return False

    else:
        return mac_specific.has_mps


def extract_device_id(args, name):
def extract_device_id(args, name):
    for x in range(len(args)):
    for x in range(len(args)):
@@ -154,56 +150,3 @@ def test_for_nans(x, where):
    message += " Use --disable-nan-check commandline argument to disable this check."
    message += " Use --disable-nan-check commandline argument to disable this check."


    raise NansException(message)
    raise NansException(message)


# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
orig_tensor_to = torch.Tensor.to
def tensor_to_fix(self, *args, **kwargs):
    if self.device.type != 'mps' and \
       ((len(args) > 0 and isinstance(args[0], torch.device) and args[0].type == 'mps') or \
       (isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')):
        self = self.contiguous()
    return orig_tensor_to(self, *args, **kwargs)


# MPS workaround for https://github.com/pytorch/pytorch/issues/80800 
orig_layer_norm = torch.nn.functional.layer_norm
def layer_norm_fix(*args, **kwargs):
    if len(args) > 0 and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps':
        args = list(args)
        args[0] = args[0].contiguous()
    return orig_layer_norm(*args, **kwargs)


# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
orig_tensor_numpy = torch.Tensor.numpy
def numpy_fix(self, *args, **kwargs):
    if self.requires_grad:
        self = self.detach()
    return orig_tensor_numpy(self, *args, **kwargs)


# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
orig_cumsum = torch.cumsum
orig_Tensor_cumsum = torch.Tensor.cumsum
def cumsum_fix(input, cumsum_func, *args, **kwargs):
    if input.device.type == 'mps':
        output_dtype = kwargs.get('dtype', input.dtype)
        if output_dtype == torch.int64:
            return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
        elif cumsum_needs_bool_fix and output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
            return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
    return cumsum_func(input, *args, **kwargs)


if has_mps():
    if version.parse(torch.__version__) < version.parse("1.13"):
        # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
        torch.Tensor.to = tensor_to_fix
        torch.nn.functional.layer_norm = layer_norm_fix
        torch.Tensor.numpy = numpy_fix
    elif version.parse(torch.__version__) > version.parse("1.13.1"):
        cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
        cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
        torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
        torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
+1 −0
Original line number Original line Diff line number Diff line
# this file is adapted from https://github.com/victorca25/iNNfer
# this file is adapted from https://github.com/victorca25/iNNfer


from collections import OrderedDict
import math
import math
import functools
import functools
import torch
import torch
Loading