Commit 762265ea authored by AUTOMATIC's avatar AUTOMATIC
Browse files

autofixes from ruff

parent a617d648
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -110,7 +110,6 @@ class LDSR:
        diffusion_steps = int(steps)
        eta = 1.0

        down_sample_method = 'Lanczos'

        gc.collect()
        if torch.cuda.is_available:
+1 −1
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ class VQModel(pl.LightningModule):
    def validation_step(self, batch, batch_idx):
        log_dict = self._validation_step(batch, batch_idx)
        with self.ema_scope():
            log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
            self._validation_step(batch, batch_idx, suffix="_ema")
        return log_dict

    def _validation_step(self, batch, batch_idx, suffix=""):
+7 −7
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ def decode_base64_to_image(encoding):
    try:
        image = Image.open(BytesIO(base64.b64decode(encoding)))
        return image
    except Exception as err:
    except Exception:
        raise HTTPException(status_code=500, detail="Invalid encoded image")

def encode_pil_to_base64(image):
@@ -264,11 +264,11 @@ class Api:
        if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
            for alwayson_script_name in request.alwayson_scripts.keys():
                alwayson_script = self.get_script(alwayson_script_name, script_runner)
                if alwayson_script == None:
                if alwayson_script is None:
                    raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
                # Selectable script in always on script param check
                if alwayson_script.alwayson == False:
                    raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params")
                if alwayson_script.alwayson is False:
                    raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
                # always on script with no arg should always run so you don't really need to add them to the requests
                if "args" in request.alwayson_scripts[alwayson_script_name]:
                    # min between arg length in scriptrunner and arg length in the request
@@ -310,7 +310,7 @@ class Api:
            p.outpath_samples = opts.outdir_txt2img_samples

            shared.state.begin()
            if selectable_scripts != None:
            if selectable_scripts is not None:
                p.script_args = script_args
                processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
            else:
@@ -367,7 +367,7 @@ class Api:
            p.outpath_samples = opts.outdir_img2img_samples

            shared.state.begin()
            if selectable_scripts != None:
            if selectable_scripts is not None:
                p.script_args = script_args
                processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
            else:
@@ -642,7 +642,7 @@ class Api:
                    sd_hijack.apply_optimizations()
                shared.state.end()
            return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
        except AssertionError as msg:
        except AssertionError:
            shared.state.end()
            return TrainResponse(info=f"train embedding error: {error}")

+2 −2
Original line number Diff line number Diff line
@@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
    result_is_instruct_pix2pix_model = False

    if theta_func2:
        shared.state.textinfo = f"Loading B"
        shared.state.textinfo = "Loading B"
        print(f"Loading {secondary_model_info.filename}...")
        theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
    else:
        theta_1 = None

    if theta_func1:
        shared.state.textinfo = f"Loading C"
        shared.state.textinfo = "Loading C"
        print(f"Loading {tertiary_model_info.filename}...")
        theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')

+2 −2
Original line number Diff line number Diff line
@@ -409,13 +409,13 @@ class FilenameGenerator:
        time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
        try:
            time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
        except pytz.exceptions.UnknownTimeZoneError as _:
        except pytz.exceptions.UnknownTimeZoneError:
            time_zone = None

        time_zone_time = time_datetime.astimezone(time_zone)
        try:
            formatted_time = time_zone_time.strftime(time_format)
        except (ValueError, TypeError) as _:
        except (ValueError, TypeError):
            formatted_time = time_zone_time.strftime(self.default_time_format)

        return sanitize_filename_part(formatted_time, replace_spaces=False)
Loading