Commit 75b36929 authored by papuSpartan's avatar papuSpartan
Browse files
parents f0efc8c2 abe32cef
Loading
Loading
Loading
Loading
+25 −18
Original line number Diff line number Diff line
@@ -18,22 +18,29 @@ jobs:
    steps:
      - name: Checkout Code
        uses: actions/checkout@v3
      - name: Set up Python 3.10
        uses: actions/setup-python@v4
      - uses: actions/setup-python@v4
        with:
          python-version: 3.10.6
          cache: pip
          cache-dependency-path: |
            **/requirements*txt
      - name: Install PyLint
        run: | 
          python -m pip install --upgrade pip
          pip install pylint
      # This lets PyLint check to see if it can resolve imports
      - name: Install dependencies
        run: |
          export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
          python launch.py
      - name: Analysing the code with pylint
        run: |
          pylint $(git ls-files '*.py')
          python-version: 3.11
          # NB: there's no cache: pip here since we're not installing anything
          #     from the requirements.txt file(s) in the repository; it's faster
          #     not to have GHA download an (at the time of writing) 4 GB cache
          #     of PyTorch and other dependencies.
      - name: Install Ruff
        run: pip install ruff==0.0.265
      - name: Run Ruff
        run: ruff .

# The rest are currently disabled pending fixing of e.g. installing the torch dependency.

#      - name: Install PyLint
#        run: |
#          python -m pip install --upgrade pip
#          pip install pylint
#      # This lets PyLint check to see if it can resolve imports
#      - name: Install dependencies
#        run: |
#          export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
#          python launch.py
#      - name: Analysing the code with pylint
#        run: |
#          pylint $(git ls-files '*.py')
+6 −0
Original line number Diff line number Diff line
@@ -17,8 +17,14 @@ jobs:
          cache: pip
          cache-dependency-path: |
            **/requirements*txt
            launch.py
      - name: Run tests
        run: python launch.py --tests test --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
        env:
          PIP_DISABLE_PIP_VERSION_CHECK: "1"
          PIP_PROGRESS_BAR: "off"
          TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
          WEBUI_LAUNCH_LIVE_OUTPUT: "1"
      - name: Upload main app stdout-stderr
        uses: actions/upload-artifact@v3
        if: always()
+44 −0
Original line number Diff line number Diff line
## Upcoming 1.2.0

### Features:
 * do not load wait for stable diffusion model to load at startup
 * add filename patterns: [denoising]
 * directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for
 * Lora: for the `<...>` text in prompt, use name of Lora that is in the metdata of the file, if present, instead of filename (both can be used to activate lora)
 * Lora: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
 * Lora: Fix some Loras not working (ones that have 3x3 convolution layer)
 * Lora: add an option to use old method of applying loras (producing same results as with kohya-ss)
 * add version to infotext, footer and console output when starting
 * add links to wiki for filename pattern settings
 * add extended info for quicksettings setting and use multiselect input instead of a text field

### Minor:
 * gradio bumped to 3.29.0
 * torch bumped to 2.0.1
 * --subpath option for gradio for use with reverse proxy
 * linux/OSX: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
 * possible frontend optimization: do not apply localizations if there are none
 * Add extra `None` option for VAE in XYZ plot
 * print error to console when batch processing in img2img fails
 * create HTML for extra network pages only on demand
 * allow directories starting with . to still list their models for lora, checkpoints, etc
 * put infotext options into their own category in settings tab
 * do not show licenses page when user selects Show all pages in settings

### Extensions:
 * Tooltip localization support
 * Add api method to get LoRA models with prompt

### Bug Fixes:
 * re-add /docs endpoint
 * fix gamepad navigation
 * make the lightbox fullscreen image function properly
 * fix squished thumbnails in extras tab
 * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
 * fix webui showing the same image if you configure the generation to always save results into same file
 * fix bug with upscalers not working properly
 * Fix MPS on PyTorch 2.0.1, Intel Macs
 * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
 * prevent Reload UI button/link from reloading the page when it's not yet ready


## 1.1.1
### Bug Fixes:
 * fix an error that prevents running webui on torch<2.0 without --disable-safe-unpickle
+6 −7
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ class LDSR:

        x_t = None
        logs = None
        for n in range(n_runs):
        for _ in range(n_runs):
            if custom_shape is not None:
                x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
                x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
@@ -110,7 +110,6 @@ class LDSR:
        diffusion_steps = int(steps)
        eta = 1.0

        down_sample_method = 'Lanczos'

        gc.collect()
        if torch.cuda.is_available:
@@ -158,7 +157,7 @@ class LDSR:


def get_cond(selected_path):
    example = dict()
    example = {}
    up_f = 4
    c = selected_path.convert('RGB')
    c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
@@ -196,7 +195,7 @@ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_s
@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
                              corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
    log = dict()
    log = {}

    z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
                                        return_first_stage_outputs=True,
@@ -244,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
        x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
        log["sample_noquant"] = x_sample_noquant
        log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
    except:
    except Exception:
        pass

    log["sample"] = x_sample
+2 −1
Original line number Diff line number Diff line
@@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
import sd_hijack_autoencoder, sd_hijack_ddpm_v1
import sd_hijack_autoencoder  # noqa: F401
import sd_hijack_ddpm_v1  # noqa: F401


class UpscalerLDSR(Upscaler):
Loading