Unverified Commit d6a9b22c authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge pull request #10232 from akx/eff

Fix up string formatting/concatenation to f-strings where feasible
parents ccbb3618 3ba6c3c8
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -570,20 +570,20 @@ class Api:
            filename = create_embedding(**args) # create empty embedding
            sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
            shared.state.end()
            return CreateResponse(info = "create embedding filename: {filename}".format(filename = filename))
            return CreateResponse(info=f"create embedding filename: {filename}")
        except AssertionError as e:
            shared.state.end()
            return TrainResponse(info = "create embedding error: {error}".format(error = e))
            return TrainResponse(info=f"create embedding error: {e}")

    def create_hypernetwork(self, args: dict):
        try:
            shared.state.begin()
            filename = create_hypernetwork(**args) # create empty embedding
            shared.state.end()
            return CreateResponse(info = "create hypernetwork filename: {filename}".format(filename = filename))
            return CreateResponse(info=f"create hypernetwork filename: {filename}")
        except AssertionError as e:
            shared.state.end()
            return TrainResponse(info = "create hypernetwork error: {error}".format(error = e))
            return TrainResponse(info=f"create hypernetwork error: {e}")

    def preprocess(self, args: dict):
        try:
@@ -593,13 +593,13 @@ class Api:
            return PreprocessResponse(info = 'preprocess complete')
        except KeyError as e:
            shared.state.end()
            return PreprocessResponse(info = "preprocess error: invalid token: {error}".format(error = e))
            return PreprocessResponse(info=f"preprocess error: invalid token: {e}")
        except AssertionError as e:
            shared.state.end()
            return PreprocessResponse(info = "preprocess error: {error}".format(error = e))
            return PreprocessResponse(info=f"preprocess error: {e}")
        except FileNotFoundError as e:
            shared.state.end()
            return PreprocessResponse(info = 'preprocess error: {error}'.format(error = e))
            return PreprocessResponse(info=f'preprocess error: {e}')

    def train_embedding(self, args: dict):
        try:
@@ -617,10 +617,10 @@ class Api:
                if not apply_optimizations:
                    sd_hijack.apply_optimizations()
                shared.state.end()
            return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error))
            return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
        except AssertionError as msg:
            shared.state.end()
            return TrainResponse(info = "train embedding error: {msg}".format(msg = msg))
            return TrainResponse(info=f"train embedding error: {msg}")

    def train_hypernetwork(self, args: dict):
        try:
@@ -641,10 +641,10 @@ class Api:
                if not apply_optimizations:
                    sd_hijack.apply_optimizations()
                shared.state.end()
            return TrainResponse(info="train embedding complete: filename: {filename} error: {error}".format(filename=filename, error=error))
            return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
        except AssertionError as msg:
            shared.state.end()
            return TrainResponse(info="train embedding error: {error}".format(error=error))
            return TrainResponse(info=f"train embedding error: {error}")

    def get_memory(self):
        try:
+3 −2
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
            max_debug_str_len = 131072 # (1024*1024)/8

            print("Error completing request", file=sys.stderr)
            argStr = f"Arguments: {str(args)} {str(kwargs)}"
            argStr = f"Arguments: {args} {kwargs}"
            print(argStr[:max_debug_str_len], file=sys.stderr)
            if len(argStr) > max_debug_str_len:
                print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr)
@@ -73,7 +73,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
            if extra_outputs_array is None:
                extra_outputs_array = [None, '']

            res = extra_outputs_array + [f"<div class='error'>{html.escape(type(e).__name__+': '+str(e))}</div>"]
            error_message = f'{type(e).__name__}: {e}'
            res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]

        shared.state.skipped = False
        shared.state.interrupted = False
+7 −4
Original line number Diff line number Diff line
@@ -156,13 +156,16 @@ class UpscalerESRGAN(Upscaler):

    def load_model(self, path: str):
        if "http" in path:
            filename = load_file_from_url(url=self.model_url, model_dir=self.model_path,
                                          file_name="%s.pth" % self.model_name,
                                          progress=True)
            filename = load_file_from_url(
                url=self.model_url,
                model_dir=self.model_path,
                file_name=f"{self.model_name}.pth",
                progress=True,
            )
        else:
            filename = path
        if not os.path.exists(filename) or filename is None:
            print("Unable to load %s from %s" % (self.model_path, filename))
            print(f"Unable to load {self.model_path} from {filename}")
            return None

        state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)
+8 −8
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ class RRDBNet(nn.Module):
        elif upsample_mode == 'pixelshuffle':
            upsample_block = pixelshuffle_block
        else:
            raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
            raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found')
        if upscale == 3:
            upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype)
        else:
@@ -261,10 +261,10 @@ class Upsample(nn.Module):

    def extra_repr(self):
        if self.scale_factor is not None:
            info = 'scale_factor=' + str(self.scale_factor)
            info = f'scale_factor={self.scale_factor}'
        else:
            info = 'size=' + str(self.size)
        info += ', mode=' + self.mode
            info = f'size={self.size}'
        info += f', mode={self.mode}'
        return info


@@ -350,7 +350,7 @@ def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0):
    elif act_type == 'sigmoid':  # [0, 1] range output
        layer = nn.Sigmoid()
    else:
        raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
        raise NotImplementedError(f'activation layer [{act_type}] is not found')
    return layer


@@ -372,7 +372,7 @@ def norm(norm_type, nc):
    elif norm_type == 'none':
        def norm_layer(x): return Identity()
    else:
        raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type))
        raise NotImplementedError(f'normalization layer [{norm_type}] is not found')
    return layer


@@ -388,7 +388,7 @@ def pad(pad_type, padding):
    elif pad_type == 'zero':
        layer = nn.ZeroPad2d(padding)
    else:
        raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))
        raise NotImplementedError(f'padding layer [{pad_type}] is not implemented')
    return layer


@@ -432,7 +432,7 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=
               pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D',
               spectral_norm=False):
    """ Conv layer with padding, normalization, activation """
    assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode)
    assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]'
    padding = get_valid_padding(kernel_size, dilation)
    p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
    padding = padding if pad_type == 'zero' else 0
+2 −1
Original line number Diff line number Diff line
@@ -10,7 +10,8 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
        additional = shared.opts.sd_hypernetwork

        if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0:
            p.all_prompts = [x + f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
            hypernet_prompt_text = f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>"
            p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts]
            params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))

        names = []
Loading