Commit 05933840 authored by AUTOMATIC's avatar AUTOMATIC
Browse files

rename print_error to report, use it with together with package name

parent d67ef01f
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -2,10 +2,9 @@ import os


from basicsr.utils.download_util import load_file_from_url
from basicsr.utils.download_util import load_file_from_url


from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
from modules import shared, script_callbacks, errors
import sd_hijack_autoencoder  # noqa: F401
import sd_hijack_autoencoder  # noqa: F401
import sd_hijack_ddpm_v1  # noqa: F401
import sd_hijack_ddpm_v1  # noqa: F401


@@ -51,7 +50,7 @@ class UpscalerLDSR(Upscaler):
        try:
        try:
            return LDSR(model, yaml)
            return LDSR(model, yaml)
        except Exception:
        except Exception:
            print_error("Error importing LDSR", exc_info=True)
            errors.report("Error importing LDSR", exc_info=True)
        return None
        return None


    def do_upscale(self, img, path):
    def do_upscale(self, img, path):
+2 −3
Original line number Original line Diff line number Diff line
@@ -9,10 +9,9 @@ from tqdm import tqdm
from basicsr.utils.download_util import load_file_from_url
from basicsr.utils.download_util import load_file_from_url


import modules.upscaler
import modules.upscaler
from modules import devices, modelloader, script_callbacks
from modules import devices, modelloader, script_callbacks, errors
from scunet_model_arch import SCUNet as net
from scunet_model_arch import SCUNet as net


from modules.errors import print_error
from modules.shared import opts
from modules.shared import opts




@@ -39,7 +38,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
                scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
                scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
                scalers.append(scaler_data)
                scalers.append(scaler_data)
            except Exception:
            except Exception:
                print_error(f"Error loading ScuNET model: {file}", exc_info=True)
                errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
        if add_model2:
        if add_model2:
            scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
            scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
            scalers.append(scaler_data2)
            scalers.append(scaler_data2)
+2 −3
Original line number Original line Diff line number Diff line
@@ -14,9 +14,8 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest
from secrets import compare_digest


import modules.shared as shared
import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors
from modules.api import models
from modules.api import models
from modules.errors import print_error
from modules.shared import opts
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
@@ -145,7 +144,7 @@ def api_middleware(app: FastAPI):
                print(message)
                print(message)
                console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
                console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
            else:
            else:
                print_error(message, exc_info=True)
                errors.report(message, exc_info=True)
        return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
        return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))


    @app.middleware("http")
    @app.middleware("http")
+2 −3
Original line number Original line Diff line number Diff line
@@ -2,8 +2,7 @@ import html
import threading
import threading
import time
import time


from modules import shared, progress
from modules import shared, progress, errors
from modules.errors import print_error


queue_lock = threading.Lock()
queue_lock = threading.Lock()


@@ -62,7 +61,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
            arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
            arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
            if len(arg_str) > max_debug_str_len:
            if len(arg_str) > max_debug_str_len:
                arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
                arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
            print_error(f"{message}\n{arg_str}", exc_info=True)
            errors.report(f"{message}\n{arg_str}", exc_info=True)


            shared.state.job = ""
            shared.state.job = ""
            shared.state.job_count = 0
            shared.state.job_count = 0
+3 −4
Original line number Original line Diff line number Diff line
@@ -5,8 +5,7 @@ import torch


import modules.face_restoration
import modules.face_restoration
import modules.shared
import modules.shared
from modules import shared, devices, modelloader
from modules import shared, devices, modelloader, errors
from modules.errors import print_error
from modules.paths import models_path
from modules.paths import models_path


# codeformer people made a choice to include modified basicsr library to their project which makes
# codeformer people made a choice to include modified basicsr library to their project which makes
@@ -105,7 +104,7 @@ def setup_model(dirname):
                        del output
                        del output
                        torch.cuda.empty_cache()
                        torch.cuda.empty_cache()
                    except Exception:
                    except Exception:
                        print_error('Failed inference for CodeFormer', exc_info=True)
                        errors.report('Failed inference for CodeFormer', exc_info=True)
                        restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
                        restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))


                    restored_face = restored_face.astype('uint8')
                    restored_face = restored_face.astype('uint8')
@@ -134,6 +133,6 @@ def setup_model(dirname):
        shared.face_restorers.append(codeformer)
        shared.face_restorers.append(codeformer)


    except Exception:
    except Exception:
        print_error("Error setting up CodeFormer", exc_info=True)
        errors.report("Error setting up CodeFormer", exc_info=True)


   # sys.path = stored_sys_path
   # sys.path = stored_sys_path
Loading